]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Mon, 27 Sep 2010 08:03:03 +0000 (01:03 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 27 Sep 2010 08:03:03 +0000 (01:03 -0700)
Conflicts:
drivers/net/qlcnic/qlcnic_init.c
net/ipv4/ip_output.c

29 files changed:
1  2 
MAINTAINERS
drivers/net/3c59x.c
drivers/net/atlx/atl1.c
drivers/net/bonding/bond_3ad.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/e1000e/netdev.c
drivers/net/ibm_newemac/core.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/qlcnic/qlcnic_init.c
drivers/net/r8169.c
drivers/net/smsc911x.c
drivers/net/usb/hso.c
drivers/net/wireless/iwlwifi/iwl-core.c
include/net/addrconf.h
include/net/tcp.h
net/core/dev.c
net/core/sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/ip6_output.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/sctp/output.c

diff --combined MAINTAINERS
index 017bf493166e7877b1e8607324be674c9737efe2,44e659530910468ac4000ad8c0bb4cc0b75f3fd6..3168d0cbd358d328061d8914ec7939afb756b342
@@@ -1120,13 -1120,6 +1120,13 @@@ W:    http://wireless.kernel.org/en/users/
  S:    Maintained
  F:    drivers/net/wireless/ath/ar9170/
  
 +CARL9170 LINUX COMMUNITY WIRELESS DRIVER
 +M:    Christian Lamparter <chunkeey@googlemail.com>
 +L:    linux-wireless@vger.kernel.org
 +W:    http://wireless.kernel.org/en/users/Drivers/carl9170
 +S:    Maintained
 +F:    drivers/net/wireless/ath/carl9170/
 +
  ATK0110 HWMON DRIVER
  M:    Luca Tettamanti <kronos.it@gmail.com>
  L:    lm-sensors@lm-sensors.org
@@@ -1142,7 -1135,7 +1142,7 @@@ ATLX ETHERNET DRIVER
  M:    Jay Cliburn <jcliburn@gmail.com>
  M:    Chris Snook <chris.snook@gmail.com>
  M:    Jie Yang <jie.yang@atheros.com>
- L:    atl1-devel@lists.sourceforge.net
+ L:    netdev@vger.kernel.org
  W:    http://sourceforge.net/projects/atl1
  W:    http://atl1.sourceforge.net
  S:    Maintained
@@@ -1405,13 -1398,6 +1405,13 @@@ L:    linux-scsi@vger.kernel.or
  S:    Supported
  F:    drivers/scsi/bfa/
  
 +BROCADE BNA 10 GIGABIT ETHERNET DRIVER
 +M:    Rasesh Mody <rmody@brocade.com>
 +M:    Debashis Dutt <ddutt@brocade.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/bna/
 +
  BSG (block layer generic sg v4 driver)
  M:    FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
  L:    linux-scsi@vger.kernel.org
@@@ -2895,12 -2881,6 +2895,12 @@@ M:    Brian King <brking@us.ibm.com
  S:    Supported
  F:    drivers/scsi/ipr.*
  
 +IBM Power Virtual Ethernet Device Driver
 +M:    Santiago Leon <santil@linux.vnet.ibm.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ibmveth.*
 +
  IBM ServeRAID RAID DRIVER
  P:    Jack Hammer
  M:    Dave Jeffery <ipslinux@adaptec.com>
@@@ -4348,12 -4328,13 +4348,12 @@@ F:   Documentation/filesystems/dlmfs.tx
  F:    fs/ocfs2/
  
  ORINOCO DRIVER
 -M:    Pavel Roskin <proski@gnu.org>
 -M:    David Gibson <hermes@gibson.dropbear.id.au>
  L:    linux-wireless@vger.kernel.org
  L:    orinoco-users@lists.sourceforge.net
  L:    orinoco-devel@lists.sourceforge.net
 +W:    http://linuxwireless.org/en/users/Drivers/orinoco
  W:    http://www.nongnu.org/orinoco/
 -S:    Maintained
 +S:    Orphan
  F:    drivers/net/wireless/orinoco/
  
  OSD LIBRARY and FILESYSTEM
@@@ -6419,7 -6400,7 +6419,7 @@@ S:      Maintaine
  F:    drivers/input/misc/wistron_btns.c
  
  WL1251 WIRELESS DRIVER
 -M:    Kalle Valo <kalle.valo@iki.fi>
 +M:    Kalle Valo <kvalo@adurom.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
@@@ -6434,7 -6415,6 +6434,7 @@@ W:      http://wireless.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
  F:    drivers/net/wireless/wl12xx/wl1271*
 +F:    include/linux/wl12xx.h
  
  WL3501 WIRELESS PCMCIA CARD DRIVER
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
@@@ -6579,20 -6559,6 +6579,20 @@@ M:    "Maciej W. Rozycki" <macro@linux-mip
  S:    Maintained
  F:    drivers/serial/zs.*
  
 +GRE DEMULTIPLEXER DRIVER
 +M:    Dmitry Kozlov <xeb@mail.ru>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    net/ipv4/gre.c
 +F:    include/net/gre.h
 +
 +PPTP DRIVER
 +M:    Dmitry Kozlov <xeb@mail.ru>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/pptp.c
 +W:    http://sourceforge.net/projects/accel-pptp
 +
  THE REST
  M:    Linus Torvalds <torvalds@linux-foundation.org>
  L:    linux-kernel@vger.kernel.org
diff --combined drivers/net/3c59x.c
index e31a6d1919c6c10369df1b205d59ed13026ebeb8,fa42103b287429e2736f83e38b275c6d44a9b36c..ed964964fe1f55539db8a4c9b0fdb571fab93a8d
@@@ -635,6 -635,9 +635,9 @@@ struct vortex_private 
                must_free_region:1,                             /* Flag: if zero, Cardbus owns the I/O region */
                large_frames:1,                 /* accept large frames */
                handling_irq:1;                 /* private in_irq indicator */
+       /* {get|set}_wol operations are already serialized by rtnl.
+        * no additional locking is required for the enable_wol and acpi_set_WOL()
+        */
        int drv_flags;
        u16 status_enable;
        u16 intr_enable;
@@@ -1739,7 -1742,7 +1742,7 @@@ vortex_open(struct net_device *dev
  
        /* Use the now-standard shared IRQ implementation. */
        if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
 -                              &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
 +                              boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
                pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
                goto err;
        }
@@@ -2939,13 -2942,11 +2942,11 @@@ static void vortex_get_wol(struct net_d
  {
        struct vortex_private *vp = netdev_priv(dev);
  
-       spin_lock_irq(&vp->lock);
        wol->supported = WAKE_MAGIC;
  
        wol->wolopts = 0;
        if (vp->enable_wol)
                wol->wolopts |= WAKE_MAGIC;
-       spin_unlock_irq(&vp->lock);
  }
  
  static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
  
-       spin_lock_irq(&vp->lock);
        if (wol->wolopts & WAKE_MAGIC)
                vp->enable_wol = 1;
        else
                vp->enable_wol = 0;
        acpi_set_WOL(dev);
-       spin_unlock_irq(&vp->lock);
  
        return 0;
  }
diff --combined drivers/net/atlx/atl1.c
index 4ba6431deeefc96a21b968a5ec03e2e424ff57c1,c73be2848319deecd38a3d6f5e8e45ed8637e6f9..b8c053f768788fda4f9bbb0af404e471ded607b8
@@@ -1251,6 -1251,12 +1251,12 @@@ static void atl1_free_ring_resources(st
  
        rrd_ring->desc = NULL;
        rrd_ring->dma = 0;
+       adapter->cmb.dma = 0;
+       adapter->cmb.cmb = NULL;
+       adapter->smb.dma = 0;
+       adapter->smb.smb = NULL;
  }
  
  static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@@ -1805,7 -1811,7 +1811,7 @@@ static void atl1_rx_checksum(struct atl
         * the higher layers and let it be sorted out there.
         */
  
 -      skb->ip_summed = CHECKSUM_NONE;
 +      skb_checksum_none_assert(skb);
  
        if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
                if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
@@@ -2094,9 -2100,9 +2100,9 @@@ static u16 atl1_tpd_avail(struct atl1_t
  {
        u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
        u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
 -      return ((next_to_clean > next_to_use) ?
 +      return (next_to_clean > next_to_use) ?
                next_to_clean - next_to_use - 1 :
 -              tpd_ring->count + next_to_clean - next_to_use - 1);
 +              tpd_ring->count + next_to_clean - next_to_use - 1;
  }
  
  static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
@@@ -2847,10 -2853,11 +2853,11 @@@ static int atl1_resume(struct pci_dev *
        pci_enable_wake(pdev, PCI_D3cold, 0);
  
        atl1_reset_hw(&adapter->hw);
-       adapter->cmb.cmb->int_stats = 0;
  
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               adapter->cmb.cmb->int_stats = 0;
                atl1_up(adapter);
+       }
        netif_device_attach(netdev);
  
        return 0;
@@@ -3036,7 -3043,7 +3043,7 @@@ static int __devinit atl1_probe(struct 
        netif_carrier_off(netdev);
        netif_stop_queue(netdev);
  
 -      setup_timer(&adapter->phy_config_timer, &atl1_phy_config,
 +      setup_timer(&adapter->phy_config_timer, atl1_phy_config,
                    (unsigned long)adapter);
        adapter->phy_timer_pending = false;
  
index 2a47c1deb9f09fa496f954eac4623f5fc0fe6b43,0ddf4c66afe21aa99679a3fb31b1e932dc51612c..079b9d1eead55adf080ee17d39c79fe99f85de4b
@@@ -252,7 -252,7 +252,7 @@@ static inline void __enable_port(struc
   */
  static inline int __port_is_enabled(struct port *port)
  {
 -      return(port->slave->state == BOND_STATE_ACTIVE);
 +      return port->slave->state == BOND_STATE_ACTIVE;
  }
  
  /**
@@@ -2466,6 -2466,9 +2466,9 @@@ int bond_3ad_lacpdu_recv(struct sk_buf
        if (!(dev->flags & IFF_MASTER))
                goto out;
  
+       if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+               goto out;
        read_lock(&bond->lock);
        slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
                                        orig_dev);
index 1ecf53dafe06c818c9e7ccd9802c0c8fb3d65a16,f208712c0b90d6b675f1ef1179f5825ae9f9f911..f9eede0a4b865e4a2c19805559d1b6bd94ded1fd
@@@ -1286,7 -1286,7 +1286,7 @@@ irq_err
  /*
   * Release resources when all the ports and offloading have been stopped.
   */
 -static void cxgb_down(struct adapter *adapter)
 +static void cxgb_down(struct adapter *adapter, int on_wq)
  {
        t3_sge_stop(adapter);
        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
        free_irq_resources(adapter);
        quiesce_rx(adapter);
        t3_sge_stop(adapter);
 -      flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
 +      if (!on_wq)
 +              flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
  }
  
  static void schedule_chk_task(struct adapter *adap)
@@@ -1375,7 -1374,7 +1375,7 @@@ static int offload_close(struct t3cdev 
        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
  
        if (!adapter->open_device_map)
 -              cxgb_down(adapter);
 +              cxgb_down(adapter, 0);
  
        cxgb3_offload_deactivate(adapter);
        return 0;
@@@ -1410,7 -1409,7 +1410,7 @@@ static int cxgb_open(struct net_device 
        return 0;
  }
  
 -static int cxgb_close(struct net_device *dev)
 +static int __cxgb_close(struct net_device *dev, int on_wq)
  {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
                cancel_delayed_work_sync(&adapter->adap_check_task);
  
        if (!adapter->open_device_map)
 -              cxgb_down(adapter);
 +              cxgb_down(adapter, on_wq);
  
        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
        return 0;
  }
  
 +static int cxgb_close(struct net_device *dev)
 +{
 +      return __cxgb_close(dev, 0);
 +}
 +
  static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
  {
        struct port_info *pi = netdev_priv(dev);
@@@ -2302,6 -2296,8 +2302,8 @@@ static int cxgb_extension_ioctl(struct 
        case CHELSIO_GET_QSET_NUM:{
                struct ch_reg edata;
  
+               memset(&edata, 0, sizeof(struct ch_reg));
                edata.cmd = CHELSIO_GET_QSET_NUM;
                edata.val = pi->nqsets;
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
@@@ -2868,7 -2864,7 +2870,7 @@@ void t3_os_link_fault_handler(struct ad
        spin_unlock(&adapter->work_lock);
  }
  
 -static int t3_adapter_error(struct adapter *adapter, int reset)
 +static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
  {
        int i, ret = 0;
  
                struct net_device *netdev = adapter->port[i];
  
                if (netif_running(netdev))
 -                      cxgb_close(netdev);
 +                      __cxgb_close(netdev, on_wq);
        }
  
        /* Stop SGE timers */
@@@ -2954,7 -2950,7 +2956,7 @@@ static void fatal_error_task(struct wor
        int err = 0;
  
        rtnl_lock();
 -      err = t3_adapter_error(adapter, 1);
 +      err = t3_adapter_error(adapter, 1, 1);
        if (!err)
                err = t3_reenable_adapter(adapter);
        if (!err)
@@@ -3004,7 -3000,7 +3006,7 @@@ static pci_ers_result_t t3_io_error_det
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
  
 -      ret = t3_adapter_error(adapter, 0);
 +      ret = t3_adapter_error(adapter, 0, 0);
  
        /* Request a slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
index 5d5850b556cf7d2dbe59ea37ec5b31b6409ea872,e561d15c3eb161558f9a7da3825f3cc6c90840c7..c69563c3ce96168bbe37d746d982204cc91d7323
@@@ -475,8 -475,7 +475,8 @@@ static void e1000_rx_checksum(struct e1
  {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
 -      skb->ip_summed = CHECKSUM_NONE;
 +
 +      skb_checksum_none_assert(skb);
  
        /* Ignore Checksum bit is set */
        if (status & E1000_RXD_STAT_IXSM)
@@@ -1053,7 -1052,7 +1053,7 @@@ static bool e1000_clean_tx_irq(struct e
        adapter->total_tx_packets += total_tx_packets;
        netdev->stats.tx_bytes += total_tx_bytes;
        netdev->stats.tx_packets += total_tx_packets;
 -      return (count < tx_ring->count);
 +      return count < tx_ring->count;
  }
  
  /**
@@@ -2705,6 -2704,16 +2705,16 @@@ static void e1000_setup_rctl(struct e10
        u32 psrctl = 0;
        u32 pages = 0;
  
+       /* Workaround Si errata on 82579 - configure jumbo frame flow */
+       if (hw->mac.type == e1000_pch2lan) {
+               s32 ret_val;
+               if (adapter->netdev->mtu > ETH_DATA_LEN)
+                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+               else
+                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+       }
        /* Program MC offset vector base */
        rctl = er32(RCTL);
        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
                e1e_wphy(hw, 22, phy_data);
        }
  
-       /* Workaround Si errata on 82579 - configure jumbo frame flow */
-       if (hw->mac.type == e1000_pch2lan) {
-               s32 ret_val;
-               if (rctl & E1000_RCTL_LPE)
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
-               else
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-       }
        /* Setup buffer sizes */
        rctl &= ~E1000_RCTL_SZ_4096;
        rctl |= E1000_RCTL_BSEX;
@@@ -3412,16 -3411,22 +3412,16 @@@ static int e1000_test_msi_interrupt(str
  
        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
                adapter->int_mode = E1000E_INT_MODE_LEGACY;
 -              err = -EIO;
 -              e_info("MSI interrupt test failed!\n");
 -      }
 +              e_info("MSI interrupt test failed, using legacy interrupt.\n");
 +      } else
 +              e_dbg("MSI interrupt test succeeded!\n");
  
        free_irq(adapter->pdev->irq, netdev);
        pci_disable_msi(adapter->pdev);
  
 -      if (err == -EIO)
 -              goto msi_test_failed;
 -
 -      /* okay so the test worked, restore settings */
 -      e_dbg("MSI interrupt test succeeded!\n");
  msi_test_failed:
        e1000e_set_interrupt_capability(adapter);
 -      e1000_request_irq(adapter);
 -      return err;
 +      return e1000_request_irq(adapter);
  }
  
  /**
@@@ -3453,6 -3458,21 +3453,6 @@@ static int e1000_test_msi(struct e1000_
                pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
        }
  
 -      /* success ! */
 -      if (!err)
 -              return 0;
 -
 -      /* EIO means MSI test failed */
 -      if (err != -EIO)
 -              return err;
 -
 -      /* back to INTx mode */
 -      e_warn("MSI interrupt test failed, using legacy interrupt.\n");
 -
 -      e1000_free_irq(adapter);
 -
 -      err = e1000_request_irq(adapter);
 -
        return err;
  }
  
@@@ -4813,6 -4833,15 +4813,15 @@@ static int e1000_change_mtu(struct net_
                return -EINVAL;
        }
  
+       /* Jumbo frame workaround on 82579 requires CRC be stripped */
+       if ((adapter->hw.mac.type == e1000_pch2lan) &&
+           !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+           (new_mtu > ETH_DATA_LEN)) {
+               e_err("Jumbo Frames not supported on 82579 when CRC "
+                     "stripping is disabled.\n");
+               return -EINVAL;
+       }
        /* 82573 Errata 17 */
        if (((adapter->hw.mac.type == e1000_82573) ||
             (adapter->hw.mac.type == e1000_82574)) &&
@@@ -5683,10 -5712,8 +5692,10 @@@ static int __devinit e1000_probe(struc
        netdev->vlan_features |= NETIF_F_HW_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
  
 -      if (pci_using_dac)
 +      if (pci_using_dac) {
                netdev->features |= NETIF_F_HIGHDMA;
 +              netdev->vlan_features |= NETIF_F_HIGHDMA;
 +      }
  
        if (e1000e_enable_mng_pass_thru(&adapter->hw))
                adapter->flags |= FLAG_MNG_PT_ENABLED;
        }
  
        init_timer(&adapter->watchdog_timer);
 -      adapter->watchdog_timer.function = &e1000_watchdog;
 +      adapter->watchdog_timer.function = e1000_watchdog;
        adapter->watchdog_timer.data = (unsigned long) adapter;
  
        init_timer(&adapter->phy_info_timer);
 -      adapter->phy_info_timer.function = &e1000_update_phy_info;
 +      adapter->phy_info_timer.function = e1000_update_phy_info;
        adapter->phy_info_timer.data = (unsigned long) adapter;
  
        INIT_WORK(&adapter->reset_task, e1000_reset_task);
index 3bdb4fa32064598774e4cf713b0f19b20f055880,519e19e23955a3c3a86d8e04f615b67daa7a996e..385dc3204cb7eefbbaa184f0601267abe7c392ee
@@@ -2095,11 -2095,11 +2095,11 @@@ static void *emac_dump_regs(struct emac
        if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
                hdr->version = EMAC4_ETHTOOL_REGS_VER;
                memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
 -              return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
 +              return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
        } else {
                hdr->version = EMAC_ETHTOOL_REGS_VER;
                memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
 -              return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
 +              return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
        }
  }
  
@@@ -2293,7 -2293,7 +2293,7 @@@ static int __devinit emac_check_deps(st
                if (deps[i].drvdata != NULL)
                        there++;
        }
 -      return (there == EMAC_DEP_COUNT);
 +      return there == EMAC_DEP_COUNT;
  }
  
  static void emac_put_deps(struct emac_instance *dev)
@@@ -2928,7 -2928,7 +2928,7 @@@ static int __devinit emac_probe(struct 
        if (dev->emac_irq != NO_IRQ)
                irq_dispose_mapping(dev->emac_irq);
   err_free:
-       kfree(ndev);
+       free_netdev(ndev);
   err_gone:
        /* if we were on the bootlist, remove us as we won't show up and
         * wake up all waiters to notify them in case they were waiting
@@@ -2971,7 -2971,7 +2971,7 @@@ static int __devexit emac_remove(struc
        if (dev->emac_irq != NO_IRQ)
                irq_dispose_mapping(dev->emac_irq);
  
-       kfree(dev->ndev);
+       free_netdev(dev->ndev);
  
        return 0;
  }
index 9b05e7da83ded453f8c480850961bed534146d27,b075a35b85d4ef09cfab1b2df3b42be0313c2516..a2d805aa75cd3a9698803d4358cbf1aa412f2330
@@@ -346,7 -346,7 +346,7 @@@ static u32 netxen_decode_crb_addr(u32 a
        if (pci_base == NETXEN_ADDR_ERROR)
                return pci_base;
        else
 -              return (pci_base + offset);
 +              return pci_base + offset;
  }
  
  #define NETXEN_MAX_ROM_WAIT_USEC      100
@@@ -1540,7 -1540,6 +1540,6 @@@ netxen_process_rcv(struct netxen_adapte
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
  
-       skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, netdev);
  
        napi_gro_receive(&sds_ring->napi, skb);
@@@ -1602,8 -1601,6 +1601,6 @@@ netxen_process_lro(struct netxen_adapte
  
        skb_put(skb, lro_length + data_offset);
  
-       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
        skb_pull(skb, l2_hdr_offset);
        skb->protocol = eth_type_trans(skb, netdev);
  
@@@ -1792,7 -1789,7 +1789,7 @@@ int netxen_process_cmd_ring(struct netx
        done = (sw_consumer == hw_consumer);
        spin_unlock(&adapter->tx_clean_lock);
  
 -      return (done);
 +      return done;
  }
  
  void
index 16dd9ebd36c949404921aadd0976d9b0df9788a3,2c7cf0b64811ed72d4d6368c02d46fa966e7de2f..5c33d15c874ab679384b7fd5a6d40be9a9560c3f
@@@ -25,7 -25,6 +25,7 @@@
  #include <linux/netdevice.h>
  #include <linux/delay.h>
  #include <linux/slab.h>
 +#include <linux/if_vlan.h>
  #include "qlcnic.h"
  
  struct crb_addr_pair {
@@@ -46,9 -45,6 +46,9 @@@ static voi
  qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
                struct qlcnic_host_rds_ring *rds_ring);
  
 +static int
 +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
 +
  static void crb_addr_transform_setup(void)
  {
        crb_addr_transform(XDMA);
@@@ -140,6 -136,8 +140,6 @@@ void qlcnic_reset_rx_buffers_list(struc
        for (ring = 0; ring < adapter->max_rds_rings; ring++) {
                rds_ring = &recv_ctx->rds_rings[ring];
  
 -              spin_lock(&rds_ring->lock);
 -
                INIT_LIST_HEAD(&rds_ring->free_list);
  
                rx_buf = rds_ring->rx_buf_arr;
                                        &rds_ring->free_list);
                        rx_buf++;
                }
 -
 -              spin_unlock(&rds_ring->lock);
        }
  }
  
@@@ -439,14 -439,11 +439,14 @@@ int qlcnic_pinit_from_rom(struct qlcnic
        u32 off;
        struct pci_dev *pdev = adapter->pdev;
  
 -      /* resetall */
 +      QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
 +      QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
 +
        qlcnic_rom_lock(adapter);
        QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
        qlcnic_rom_unlock(adapter);
  
 +      /* Init HW CRB block */
        if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
                        qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
                dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
        }
        kfree(buf);
  
 -      /* p2dn replyCount */
 +      /* Initialize protocol process engine */
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
 -      /* disable_peg_cache 0 & 1*/
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
 -
 -      /* peg_clr_all */
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
        QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
 +      QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
 +      QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
 +      msleep(1);
 +      QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
 +      QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
 +      return 0;
 +}
 +
 +static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
 +{
 +      u32 val;
 +      int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
 +
 +      do {
 +              val = QLCRD32(adapter, CRB_CMDPEG_STATE);
 +
 +              switch (val) {
 +              case PHAN_INITIALIZE_COMPLETE:
 +              case PHAN_INITIALIZE_ACK:
 +                      return 0;
 +              case PHAN_INITIALIZE_FAILED:
 +                      goto out_err;
 +              default:
 +                      break;
 +              }
 +
 +              msleep(QLCNIC_CMDPEG_CHECK_DELAY);
 +
 +      } while (--retries);
 +
 +      QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
 +
 +out_err:
 +      dev_err(&adapter->pdev->dev, "Command Peg initialization not "
 +                    "complete, state: 0x%x.\n", val);
 +      return -EIO;
 +}
 +
 +static int
 +qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
 +{
 +      u32 val;
 +      int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
 +
 +      do {
 +              val = QLCRD32(adapter, CRB_RCVPEG_STATE);
 +
 +              if (val == PHAN_PEG_RCV_INITIALIZED)
 +                      return 0;
 +
 +              msleep(QLCNIC_RCVPEG_CHECK_DELAY);
 +
 +      } while (--retries);
 +
 +      if (!retries) {
 +              dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
 +                            "complete, state: 0x%x.\n", val);
 +              return -EIO;
 +      }
 +
        return 0;
  }
  
 +int
 +qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
 +{
 +      int err;
 +
 +      err = qlcnic_cmd_peg_ready(adapter);
 +      if (err)
 +              return err;
 +
 +      err = qlcnic_receive_peg_ready(adapter);
 +      if (err)
 +              return err;
 +
 +      QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
 +
 +      return err;
 +}
 +
  int
  qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
  
        }
        adapter->physical_port = (val >> 2);
        if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
 -              timeo = 30;
 +              timeo = QLCNIC_INIT_TIMEOUT_SECS;
  
        adapter->dev_init_timeo = timeo;
  
        if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
 -              timeo = 10;
 +              timeo = QLCNIC_RESET_TIMEOUT_SECS;
  
        adapter->reset_ack_timeo = timeo;
  
@@@ -984,47 -906,54 +984,47 @@@ qlcnic_get_bios_version(struct qlcnic_a
        return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
  }
  
 -int
 -qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
 +static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
  {
 -      u32 count, old_count;
 -      u32 val, version, major, minor, build;
 -      int i, timeout;
 -
 -      if (adapter->need_fw_reset)
 -              return 1;
 +      if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
 +              dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
  
 -      /* last attempt had failed */
 -      if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
 -              return 1;
 +      qlcnic_pcie_sem_unlock(adapter, 2);
 +}
  
 -      old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
 +static int
 +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
 +{
 +      u32 heartbeat, ret = -EIO;
 +      int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
  
 -      for (i = 0; i < 10; i++) {
 +      adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
  
 -              timeout = msleep_interruptible(200);
 -              if (timeout) {
 -                      QLCWR32(adapter, CRB_CMDPEG_STATE,
 -                                      PHAN_INITIALIZE_FAILED);
 -                      return -EINTR;
 +      do {
 +              msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
 +              heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
 +              if (heartbeat != adapter->heartbeat) {
 +                      ret = QLCNIC_RCODE_SUCCESS;
 +                      break;
                }
 +      } while (--retries);
  
 -              count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
 -              if (count != old_count)
 -                      break;
 -      }
 +      return ret;
 +}
  
 -      /* firmware is dead */
 -      if (count == old_count)
 +int
 +qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
 +{
 +      if (qlcnic_check_fw_hearbeat(adapter)) {
 +              qlcnic_rom_lock_recovery(adapter);
                return 1;
 +      }
  
 -      /* check if we have got newer or different file firmware */
 -      if (adapter->fw) {
 -
 -              val = qlcnic_get_fw_version(adapter);
 -
 -              version = QLCNIC_DECODE_VERSION(val);
 -
 -              major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
 -              minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
 -              build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
 +      if (adapter->need_fw_reset)
 +              return 1;
  
 -              if (version > QLCNIC_VERSION_CODE(major, minor, build))
 -                      return 1;
 -      }
 +      if (adapter->fw)
 +              return 1;
  
        return 0;
  }
@@@ -1160,6 -1089,18 +1160,6 @@@ qlcnic_validate_firmware(struct qlcnic_
                return -EINVAL;
        }
  
 -      /* check if flashed firmware is newer */
 -      if (qlcnic_rom_fast_read(adapter,
 -                      QLCNIC_FW_VERSION_OFFSET, (int *)&val))
 -              return -EIO;
 -
 -      val = QLCNIC_DECODE_VERSION(val);
 -      if (val > ver) {
 -              dev_info(&pdev->dev, "%s: firmware is older than flash\n",
 -                              fw_name[fw_type]);
 -              return -EINVAL;
 -      }
 -
        QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
        return 0;
  }
@@@ -1221,6 -1162,78 +1221,6 @@@ qlcnic_release_firmware(struct qlcnic_a
        adapter->fw = NULL;
  }
  
 -static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
 -{
 -      u32 val;
 -      int retries = 60;
 -
 -      do {
 -              val = QLCRD32(adapter, CRB_CMDPEG_STATE);
 -
 -              switch (val) {
 -              case PHAN_INITIALIZE_COMPLETE:
 -              case PHAN_INITIALIZE_ACK:
 -                      return 0;
 -              case PHAN_INITIALIZE_FAILED:
 -                      goto out_err;
 -              default:
 -                      break;
 -              }
 -
 -              msleep(500);
 -
 -      } while (--retries);
 -
 -      QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
 -
 -out_err:
 -      dev_err(&adapter->pdev->dev, "Command Peg initialization not "
 -                    "complete, state: 0x%x.\n", val);
 -      return -EIO;
 -}
 -
 -static int
 -qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
 -{
 -      u32 val;
 -      int retries = 2000;
 -
 -      do {
 -              val = QLCRD32(adapter, CRB_RCVPEG_STATE);
 -
 -              if (val == PHAN_PEG_RCV_INITIALIZED)
 -                      return 0;
 -
 -              msleep(10);
 -
 -      } while (--retries);
 -
 -      if (!retries) {
 -              dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
 -                            "complete, state: 0x%x.\n", val);
 -              return -EIO;
 -      }
 -
 -      return 0;
 -}
 -
 -int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
 -{
 -      int err;
 -
 -      err = qlcnic_cmd_peg_ready(adapter);
 -      if (err)
 -              return err;
 -
 -      err = qlcnic_receive_peg_ready(adapter);
 -      if (err)
 -              return err;
 -
 -      QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
 -
 -      return err;
 -}
 -
  static void
  qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
                                struct qlcnic_fw_msg *msg)
@@@ -1303,7 -1316,7 +1303,7 @@@ qlcnic_alloc_rx_skb(struct qlcnic_adapt
                return -ENOMEM;
        }
  
-       skb_reserve(skb, 2);
+       skb_reserve(skb, NET_IP_ALIGN);
  
        dma = pci_map_single(pdev, skb->data,
                        rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@@ -1338,12 -1351,11 +1338,12 @@@ static struct sk_buff *qlcnic_process_r
  
        skb = buffer->skb;
  
 -      if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
 +      if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
 +                                              cksum == STATUS_CKSUM_LOOP))) {
                adapter->stats.csummed++;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else {
 -              skb->ip_summed = CHECKSUM_NONE;
 +              skb_checksum_none_assert(skb);
        }
  
        skb->dev = adapter->netdev;
        return skb;
  }
  
 +static int
 +qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
 +                      u16 *vlan_tag)
 +{
 +      struct ethhdr *eth_hdr;
 +
 +      if (!__vlan_get_tag(skb, vlan_tag)) {
 +              eth_hdr = (struct ethhdr *) skb->data;
 +              memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 +              skb_pull(skb, VLAN_HLEN);
 +      }
 +      if (!adapter->pvid)
 +              return 0;
 +
 +      if (*vlan_tag == adapter->pvid) {
 +              /* Outer vlan tag. Packet should follow non-vlan path */
 +              *vlan_tag = 0xffff;
 +              return 0;
 +      }
 +      if (adapter->flags & QLCNIC_TAGGING_ENABLED)
 +              return 0;
 +
 +      return -EINVAL;
 +}
 +
  static struct qlcnic_rx_buffer *
  qlcnic_process_rcv(struct qlcnic_adapter *adapter,
                struct qlcnic_host_sds_ring *sds_ring,
        struct sk_buff *skb;
        struct qlcnic_host_rds_ring *rds_ring;
        int index, length, cksum, pkt_offset;
 +      u16 vid = 0xffff;
  
        if (unlikely(ring >= adapter->max_rds_rings))
                return NULL;
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
  
 +      if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
 +              adapter->stats.rxdropped++;
 +              dev_kfree_skb(skb);
 +              return buffer;
 +      }
 +
        skb->protocol = eth_type_trans(skb, netdev);
  
 -      napi_gro_receive(&sds_ring->napi, skb);
 +      if ((vid != 0xffff) && adapter->vlgrp)
 +              vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
 +      else
 +              napi_gro_receive(&sds_ring->napi, skb);
  
        adapter->stats.rx_pkts++;
        adapter->stats.rxbytes += length;
@@@ -1458,7 -1435,6 +1458,7 @@@ qlcnic_process_lro(struct qlcnic_adapte
        int index;
        u16 lro_length, length, data_offset;
        u32 seq_number;
 +      u16 vid = 0xffff;
  
        if (unlikely(ring > adapter->max_rds_rings))
                return NULL;
        skb_put(skb, lro_length + data_offset);
  
        skb_pull(skb, l2_hdr_offset);
 +
 +      if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
 +              adapter->stats.rxdropped++;
 +              dev_kfree_skb(skb);
 +              return buffer;
 +      }
 +
        skb->protocol = eth_type_trans(skb, netdev);
  
        iph = (struct iphdr *)skb->data;
  
        length = skb->len;
  
 -      netif_receive_skb(skb);
 +      if ((vid != 0xffff) && adapter->vlgrp)
 +              vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
 +      else
 +              netif_receive_skb(skb);
  
        adapter->stats.lro_pkts++;
        adapter->stats.lrobytes += length;
@@@ -1618,6 -1584,8 +1618,6 @@@ qlcnic_post_rx_buffers(struct qlcnic_ad
        int producer, count = 0;
        struct list_head *head;
  
 -      spin_lock(&rds_ring->lock);
 -
        producer = rds_ring->producer;
  
        head = &rds_ring->free_list;
                writel((producer-1) & (rds_ring->num_desc-1),
                                rds_ring->crb_rcv_producer);
        }
 -      spin_unlock(&rds_ring->lock);
  }
  
  static void
diff --combined drivers/net/r8169.c
index 54900332f12d87faf40386505b6f150fab86b1d0,a0da4a17b025ce08469659a420d37944861876d3..fe3b7622fba05445d265e26ed343e71626e89856
@@@ -1076,12 -1076,7 +1076,12 @@@ static int rtl8169_rx_vlan_skb(struct r
        int ret;
  
        if (vlgrp && (opts2 & RxVlanTag)) {
 -              __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
 +              u16 vtag = swab16(opts2 & 0xffff);
 +
 +              if (likely(polling))
 +                      vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
 +              else
 +                      __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
                ret = 0;
        } else
                ret = -1;
@@@ -2939,7 -2934,7 +2939,7 @@@ static const struct rtl_cfg_info 
                .hw_start       = rtl_hw_start_8168,
                .region         = 2,
                .align          = 8,
-               .intr_event     = SYSErr | LinkChg | RxOverflow |
+               .intr_event     = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
                                  TxErr | TxOK | RxOK | RxErr,
                .napi_event     = TxErr | TxOK | RxOK | RxOverflow,
                .features       = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@@ -3191,7 -3186,6 +3191,7 @@@ rtl8169_init_one(struct pci_dev *pdev, 
  #ifdef CONFIG_R8169_VLAN
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  #endif
 +      dev->features |= NETIF_F_GRO;
  
        tp->intr_mask = 0xffff;
        tp->align = cfg->align;
@@@ -4456,8 -4450,9 +4456,8 @@@ static inline int rtl8169_fragmented_fr
        return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
  }
  
 -static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
 +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
  {
 -      u32 opts1 = le32_to_cpu(desc->opts1);
        u32 status = opts1 & RxProtoMask;
  
        if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
            ((status == RxProtoIP) && !(opts1 & IPFail)))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
 -              skb->ip_summed = CHECKSUM_NONE;
 +              skb_checksum_none_assert(skb);
  }
  
  static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
@@@ -4551,6 -4546,8 +4551,6 @@@ static int rtl8169_rx_interrupt(struct 
                                continue;
                        }
  
 -                      rtl8169_rx_csum(skb, desc);
 -
                        if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
                                pci_dma_sync_single_for_device(pdev, addr,
                                        pkt_size, PCI_DMA_FROMDEVICE);
                                tp->Rx_skbuff[entry] = NULL;
                        }
  
 +                      rtl8169_rx_csum(skb, status);
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
  
                        if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
                                if (likely(polling))
 -                                      netif_receive_skb(skb);
 +                                      napi_gro_receive(&tp->napi, skb);
                                else
                                        netif_rx(skb);
                        }
@@@ -4629,8 -4625,7 +4629,7 @@@ static irqreturn_t rtl8169_interrupt(in
                }
  
                /* Work around for rx fifo overflow */
-               if (unlikely(status & RxFIFOOver) &&
-               (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+               if (unlikely(status & RxFIFOOver)) {
                        netif_stop_queue(dev);
                        rtl8169_tx_timeout(dev);
                        break;
diff --combined drivers/net/smsc911x.c
index 13ddcd4872005830f09bdf3ee9c02c5e7d572eb6,8150ba1541161ab2e7781ade13be970540f835ec..a8e5856ce8821b4f441173be994d90bd021b9bcb
@@@ -58,6 -58,7 +58,7 @@@
  
  MODULE_LICENSE("GPL");
  MODULE_VERSION(SMSC_DRV_VERSION);
+ MODULE_ALIAS("platform:smsc911x");
  
  #if USE_DEBUG > 0
  static int debug = 16;
@@@ -1048,7 -1049,7 +1049,7 @@@ static int smsc911x_poll(struct napi_st
                smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
                                     pktwords);
                skb->protocol = eth_type_trans(skb, dev);
 -              skb->ip_summed = CHECKSUM_NONE;
 +              skb_checksum_none_assert(skb);
                netif_receive_skb(skb);
  
                /* Update counters */
diff --combined drivers/net/usb/hso.c
index 4f123f869bdc8a26d97a90d5d8622d15dd366db9,1cd752f9a6e1e8586f3eda9fcb854ac0226de67a..8110595fbbcc4ca8b9b355ad02f9eef507c1b781
@@@ -843,7 -843,16 +843,7 @@@ static netdev_tx_t hso_net_start_xmit(s
        return NETDEV_TX_OK;
  }
  
 -static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
 -{
 -      struct hso_net *odev = netdev_priv(net);
 -
 -      strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
 -      usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
 -}
 -
  static const struct ethtool_ops ops = {
 -      .get_drvinfo = hso_get_drvinfo,
        .get_link = ethtool_op_get_link
  };
  
@@@ -1643,6 -1652,8 +1643,8 @@@ static int hso_get_count(struct hso_ser
        struct uart_icount cnow;
        struct hso_tiocmget  *tiocmget = serial->tiocmget;
  
+       memset(&icount, 0, sizeof(struct serial_icounter_struct));
        if (!tiocmget)
                 return -ENOENT;
        spin_lock_irq(&serial->serial_lock);
index 393f02d94c4e324a2439ee387e6117240c9d9e67,e23c4060a0f093e966ca355af1d466880f65b0ee..5c568933ce48f521e24ef868b70f6e7c74d47c7b
@@@ -64,8 -64,7 +64,8 @@@ MODULE_LICENSE("GPL")
   *
   * default: bt_coex_active = true (BT_COEX_ENABLE)
   */
 -static bool bt_coex_active = true;
 +bool bt_coex_active = true;
 +EXPORT_SYMBOL_GPL(bt_coex_active);
  module_param(bt_coex_active, bool, S_IRUGO);
  MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
  
@@@ -147,10 -146,6 +147,10 @@@ u8 iwl_toggle_tx_ant(struct iwl_priv *p
        int i;
        u8 ind = ant;
  
 +      if (priv->band == IEEE80211_BAND_2GHZ &&
 +          priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
 +              return 0;
 +
        for (i = 0; i < RATE_ANT_NUM - 1; i++) {
                ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
                if (valid & BIT(ind))
@@@ -188,33 -183,38 +188,33 @@@ out
  }
  EXPORT_SYMBOL(iwl_alloc_all);
  
 -void iwl_hw_detect(struct iwl_priv *priv)
 -{
 -      priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
 -      priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
 -      pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
 -}
 -EXPORT_SYMBOL(iwl_hw_detect);
 -
  /*
   * QoS  support
  */
 -static void iwl_update_qos(struct iwl_priv *priv)
 +static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
  {
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
  
 -      priv->qos_data.def_qos_parm.qos_flags = 0;
 +      if (!ctx->is_active)
 +              return;
 +
 +      ctx->qos_data.def_qos_parm.qos_flags = 0;
  
 -      if (priv->qos_data.qos_active)
 -              priv->qos_data.def_qos_parm.qos_flags |=
 +      if (ctx->qos_data.qos_active)
 +              ctx->qos_data.def_qos_parm.qos_flags |=
                        QOS_PARAM_FLG_UPDATE_EDCA_MSK;
  
 -      if (priv->current_ht_config.is_ht)
 -              priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
 +      if (ctx->ht.enabled)
 +              ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
  
        IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
 -                    priv->qos_data.qos_active,
 -                    priv->qos_data.def_qos_parm.qos_flags);
 +                    ctx->qos_data.qos_active,
 +                    ctx->qos_data.def_qos_parm.qos_flags);
  
 -      iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
 +      iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
                               sizeof(struct iwl_qosparam_cmd),
 -                             &priv->qos_data.def_qos_parm, NULL);
 +                             &ctx->qos_data.def_qos_parm, NULL);
  }
  
  #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
@@@ -247,11 -247,7 +247,11 @@@ static void iwlcore_init_ht_hw_capab(co
                ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
  
        ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
 +      if (priv->cfg->ampdu_factor)
 +              ht_info->ampdu_factor = priv->cfg->ampdu_factor;
        ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
 +      if (priv->cfg->ampdu_density)
 +              ht_info->ampdu_density = priv->cfg->ampdu_density;
  
        ht_info->mcs.rx_mask[0] = 0xFF;
        if (rx_chains_num >= 2)
@@@ -444,15 -440,15 +444,15 @@@ static bool is_single_rx_stream(struct 
               priv->current_ht_config.single_chain_sufficient;
  }
  
 -static u8 iwl_is_channel_extension(struct iwl_priv *priv,
 -                                 enum ieee80211_band band,
 -                                 u16 channel, u8 extension_chan_offset)
 +static bool iwl_is_channel_extension(struct iwl_priv *priv,
 +                                   enum ieee80211_band band,
 +                                   u16 channel, u8 extension_chan_offset)
  {
        const struct iwl_channel_info *ch_info;
  
        ch_info = iwl_get_channel_info(priv, band, channel);
        if (!is_channel_valid(ch_info))
 -              return 0;
 +              return false;
  
        if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
                return !(ch_info->ht40_extension_channel &
                return !(ch_info->ht40_extension_channel &
                                        IEEE80211_CHAN_NO_HT40MINUS);
  
 -      return 0;
 +      return false;
  }
  
 -u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 -                       struct ieee80211_sta_ht_cap *sta_ht_inf)
 +bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 +                          struct iwl_rxon_context *ctx,
 +                          struct ieee80211_sta_ht_cap *ht_cap)
  {
 -      struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 -
 -      if (!ht_conf->is_ht || !ht_conf->is_40mhz)
 -              return 0;
 +      if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
 +              return false;
  
 -      /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
 +      /*
 +       * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
         * the bit will not set if it is pure 40MHz case
         */
 -      if (sta_ht_inf) {
 -              if (!sta_ht_inf->ht_supported)
 -                      return 0;
 -      }
 +      if (ht_cap && !ht_cap->ht_supported)
 +              return false;
 +
  #ifdef CONFIG_IWLWIFI_DEBUGFS
        if (priv->disable_ht40)
 -              return 0;
 +              return false;
  #endif
 +
        return iwl_is_channel_extension(priv, priv->band,
 -                      le16_to_cpu(priv->staging_rxon.channel),
 -                      ht_conf->extension_chan_offset);
 +                      le16_to_cpu(ctx->staging.channel),
 +                      ctx->ht.extension_chan_offset);
  }
  EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
  
  static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
  {
 -      u16 new_val = 0;
 -      u16 beacon_factor = 0;
 +      u16 new_val;
 +      u16 beacon_factor;
 +
 +      /*
 +       * If mac80211 hasn't given us a beacon interval, program
 +       * the default into the device (not checking this here
 +       * would cause the adjustment below to return the maximum
 +       * value, which may break PAN.)
 +       */
 +      if (!beacon_val)
 +              return DEFAULT_BEACON_INTERVAL;
 +
 +      /*
 +       * If the beacon interval we obtained from the peer
 +       * is too large, we'll have to wake up more often
 +       * (and in IBSS case, we'll beacon too much)
 +       *
 +       * For example, if max_beacon_val is 4096, and the
 +       * requested beacon interval is 7000, we'll have to
 +       * use 3500 to be able to wake up on the beacons.
 +       *
 +       * This could badly influence beacon detection stats.
 +       */
  
        beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
        new_val = beacon_val / beacon_factor;
        return new_val;
  }
  
 -void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
 +int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
  {
        u64 tsf;
        s32 interval_tm, rem;
 -      unsigned long flags;
        struct ieee80211_conf *conf = NULL;
        u16 beacon_int;
 +      struct ieee80211_vif *vif = ctx->vif;
  
        conf = ieee80211_get_hw_conf(priv->hw);
  
 -      spin_lock_irqsave(&priv->lock, flags);
 -      priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
 -      priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
 +      lockdep_assert_held(&priv->mutex);
  
 -      beacon_int = vif->bss_conf.beacon_int;
 +      memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
  
 -      if (vif->type == NL80211_IFTYPE_ADHOC) {
 -              /* TODO: we need to get atim_window from upper stack
 -               * for now we set to 0 */
 -              priv->rxon_timing.atim_window = 0;
 -      } else {
 -              priv->rxon_timing.atim_window = 0;
 -      }
 +      ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
 +      ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
  
 -      beacon_int = iwl_adjust_beacon_interval(beacon_int,
 +      beacon_int = vif ? vif->bss_conf.beacon_int : 0;
 +
 +      /*
 +       * TODO: For IBSS we need to get atim_window from mac80211,
 +       *       for now just always use 0
 +       */
 +      ctx->timing.atim_window = 0;
 +
 +      if (ctx->ctxid == IWL_RXON_CTX_PAN &&
 +          (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
 +          iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
 +          priv->contexts[IWL_RXON_CTX_BSS].vif &&
 +          priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
 +              ctx->timing.beacon_interval =
 +                      priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
 +              beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
 +      } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
 +                 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
 +                 priv->contexts[IWL_RXON_CTX_PAN].vif &&
 +                 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
 +                 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
 +                  !ctx->vif->bss_conf.beacon_int)) {
 +              ctx->timing.beacon_interval =
 +                      priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
 +              beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
 +      } else {
 +              beacon_int = iwl_adjust_beacon_interval(beacon_int,
                                priv->hw_params.max_beacon_itrvl * TIME_UNIT);
 -      priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
 +              ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
 +      }
  
        tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
        interval_tm = beacon_int * TIME_UNIT;
        rem = do_div(tsf, interval_tm);
 -      priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
 +      ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
 +
 +      ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
  
 -      spin_unlock_irqrestore(&priv->lock, flags);
        IWL_DEBUG_ASSOC(priv,
                        "beacon interval %d beacon timer %d beacon tim %d\n",
 -                      le16_to_cpu(priv->rxon_timing.beacon_interval),
 -                      le32_to_cpu(priv->rxon_timing.beacon_init_val),
 -                      le16_to_cpu(priv->rxon_timing.atim_window));
 +                      le16_to_cpu(ctx->timing.beacon_interval),
 +                      le32_to_cpu(ctx->timing.beacon_init_val),
 +                      le16_to_cpu(ctx->timing.atim_window));
 +
 +      return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
 +                              sizeof(ctx->timing), &ctx->timing);
  }
 -EXPORT_SYMBOL(iwl_setup_rxon_timing);
 +EXPORT_SYMBOL(iwl_send_rxon_timing);
  
 -void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
 +void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 +                         int hw_decrypt)
  {
 -      struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 +      struct iwl_rxon_cmd *rxon = &ctx->staging;
  
        if (hw_decrypt)
                rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@@ -610,11 -560,11 +610,11 @@@ EXPORT_SYMBOL(iwl_set_rxon_hwcrypto)
   * be #ifdef'd out once the driver is stable and folks aren't actively
   * making changes
   */
 -int iwl_check_rxon_cmd(struct iwl_priv *priv)
 +int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
  {
        int error = 0;
        int counter = 1;
 -      struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 +      struct iwl_rxon_cmd *rxon = &ctx->staging;
  
        if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
                error |= le32_to_cpu(rxon->flags &
@@@ -686,83 -636,66 +686,83 @@@ EXPORT_SYMBOL(iwl_check_rxon_cmd)
   * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
   * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
   */
 -int iwl_full_rxon_required(struct iwl_priv *priv)
 +int iwl_full_rxon_required(struct iwl_priv *priv,
 +                         struct iwl_rxon_context *ctx)
  {
 +      const struct iwl_rxon_cmd *staging = &ctx->staging;
 +      const struct iwl_rxon_cmd *active = &ctx->active;
 +
 +#define CHK(cond)                                                     \
 +      if ((cond)) {                                                   \
 +              IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");   \
 +              return 1;                                               \
 +      }
 +
 +#define CHK_NEQ(c1, c2)                                               \
 +      if ((c1) != (c2)) {                                     \
 +              IWL_DEBUG_INFO(priv, "need full RXON - "        \
 +                             #c1 " != " #c2 " - %d != %d\n",  \
 +                             (c1), (c2));                     \
 +              return 1;                                       \
 +      }
  
        /* These items are only settable from the full RXON command */
 -      if (!(iwl_is_associated(priv)) ||
 -          compare_ether_addr(priv->staging_rxon.bssid_addr,
 -                             priv->active_rxon.bssid_addr) ||
 -          compare_ether_addr(priv->staging_rxon.node_addr,
 -                             priv->active_rxon.node_addr) ||
 -          compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
 -                             priv->active_rxon.wlap_bssid_addr) ||
 -          (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
 -          (priv->staging_rxon.channel != priv->active_rxon.channel) ||
 -          (priv->staging_rxon.air_propagation !=
 -           priv->active_rxon.air_propagation) ||
 -          (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
 -           priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
 -          (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
 -           priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
 -          (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
 -           priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
 -          (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
 -              return 1;
 +      CHK(!iwl_is_associated_ctx(ctx));
 +      CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
 +      CHK(compare_ether_addr(staging->node_addr, active->node_addr));
 +      CHK(compare_ether_addr(staging->wlap_bssid_addr,
 +                              active->wlap_bssid_addr));
 +      CHK_NEQ(staging->dev_type, active->dev_type);
 +      CHK_NEQ(staging->channel, active->channel);
 +      CHK_NEQ(staging->air_propagation, active->air_propagation);
 +      CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
 +              active->ofdm_ht_single_stream_basic_rates);
 +      CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
 +              active->ofdm_ht_dual_stream_basic_rates);
 +      CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
 +              active->ofdm_ht_triple_stream_basic_rates);
 +      CHK_NEQ(staging->assoc_id, active->assoc_id);
  
        /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
         * be updated with the RXON_ASSOC command -- however only some
         * flag transitions are allowed using RXON_ASSOC */
  
        /* Check if we are not switching bands */
 -      if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
 -          (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
 -              return 1;
 +      CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
 +              active->flags & RXON_FLG_BAND_24G_MSK);
  
        /* Check if we are switching association toggle */
 -      if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
 -              (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
 -              return 1;
 +      CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
 +              active->filter_flags & RXON_FILTER_ASSOC_MSK);
 +
 +#undef CHK
 +#undef CHK_NEQ
  
        return 0;
  }
  EXPORT_SYMBOL(iwl_full_rxon_required);
  
 -u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
 +u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
 +                          struct iwl_rxon_context *ctx)
  {
        /*
         * Assign the lowest rate -- should really get this from
         * the beacon skb from mac80211.
         */
 -      if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
 +      if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
                return IWL_RATE_1M_PLCP;
        else
                return IWL_RATE_6M_PLCP;
  }
  EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
  
 -void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
 +static void _iwl_set_rxon_ht(struct iwl_priv *priv,
 +                           struct iwl_ht_config *ht_conf,
 +                           struct iwl_rxon_context *ctx)
  {
 -      struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 +      struct iwl_rxon_cmd *rxon = &ctx->staging;
  
 -      if (!ht_conf->is_ht) {
 +      if (!ctx->ht.enabled) {
                rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
                        RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
                        RXON_FLG_HT40_PROT_MSK |
                return;
        }
  
 -      /* FIXME: if the definition of ht_protection changed, the "translation"
 +      /* FIXME: if the definition of ht.protection changed, the "translation"
         * will be needed for rxon->flags
         */
 -      rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
 +      rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
  
        /* Set up channel bandwidth:
         * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
        /* clear the HT channel mode before set the mode */
        rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
                         RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
 -      if (iwl_is_ht40_tx_allowed(priv, NULL)) {
 +      if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
                /* pure ht40 */
 -              if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
 +              if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
                        rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
                        /* Note: control channel is opposite of extension channel */
 -                      switch (ht_conf->extension_chan_offset) {
 +                      switch (ctx->ht.extension_chan_offset) {
                        case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
                                rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
                                break;
                        }
                } else {
                        /* Note: control channel is opposite of extension channel */
 -                      switch (ht_conf->extension_chan_offset) {
 +                      switch (ctx->ht.extension_chan_offset) {
                        case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
                                rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
                                rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
        }
  
        if (priv->cfg->ops->hcmd->set_rxon_chain)
 -              priv->cfg->ops->hcmd->set_rxon_chain(priv);
 +              priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
  
        IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
                        "extension channel offset 0x%x\n",
 -                      le32_to_cpu(rxon->flags), ht_conf->ht_protection,
 -                      ht_conf->extension_chan_offset);
 +                      le32_to_cpu(rxon->flags), ctx->ht.protection,
 +                      ctx->ht.extension_chan_offset);
 +}
 +
 +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
 +{
 +      struct iwl_rxon_context *ctx;
 +
 +      for_each_context(priv, ctx)
 +              _iwl_set_rxon_ht(priv, ht_conf, ctx);
  }
  EXPORT_SYMBOL(iwl_set_rxon_ht);
  
   */
  static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
  {
 +      if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
 +          priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
 +              /*
 +               * only use chain 'A' in bt high traffic load or
 +               * full concurrency mode
 +               */
 +              return IWL_NUM_RX_CHAINS_SINGLE;
 +      }
        /* # of Rx chains to use when expecting MIMO. */
        if (is_single_rx_stream(priv))
                return IWL_NUM_RX_CHAINS_SINGLE;
@@@ -902,7 -819,7 +902,7 @@@ static u8 iwl_count_chain_bitmap(u32 ch
   * Selects how many and which Rx receivers/antennas/chains to use.
   * This should not be used for scan command ... it puts data in wrong place.
   */
 -void iwl_set_rxon_chain(struct iwl_priv *priv)
 +void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
  {
        bool is_single = is_single_rx_stream(priv);
        bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
         * Before first association, we assume all antennas are connected.
         * Just after first association, iwl_chain_noise_calibration()
         *    checks which antennas actually *are* connected. */
 -       if (priv->chain_noise_data.active_chains)
 +      if (priv->chain_noise_data.active_chains)
                active_chains = priv->chain_noise_data.active_chains;
        else
                active_chains = priv->hw_params.valid_rx_ant;
  
 +      if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
 +          priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
 +              /*
 +               * only use chain 'A' in bt high traffic load or
 +               * full concurrency mode
 +               */
 +              active_chains = first_antenna(active_chains);
 +      }
 +
        rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
  
        /* How many receivers should we use? */
        rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
        rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
  
 -      priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
 +      ctx->staging.rx_chain = cpu_to_le16(rx_chain);
  
        if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
 -              priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
 +              ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
        else
 -              priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
 +              ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
  
        IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
 -                      priv->staging_rxon.rx_chain,
 +                      ctx->staging.rx_chain,
                        active_rx_cnt, idle_rx_cnt);
  
        WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
  }
  EXPORT_SYMBOL(iwl_set_rxon_chain);
  
 -/* Return valid channel */
 +/* Return valid, unused, channel for a passive scan to reset the RF */
  u8 iwl_get_single_channel_number(struct iwl_priv *priv,
 -                                enum ieee80211_band band)
 +                               enum ieee80211_band band)
  {
        const struct iwl_channel_info *ch_info;
        int i;
        u8 channel = 0;
 +      u8 min, max;
 +      struct iwl_rxon_context *ctx;
  
 -      /* only scan single channel, good enough to reset the RF */
 -      /* pick the first valid not in-use channel */
        if (band == IEEE80211_BAND_5GHZ) {
 -              for (i = 14; i < priv->channel_count; i++) {
 -                      if (priv->channel_info[i].channel !=
 -                          le16_to_cpu(priv->staging_rxon.channel)) {
 -                              channel = priv->channel_info[i].channel;
 -                              ch_info = iwl_get_channel_info(priv,
 -                                      band, channel);
 -                              if (is_channel_valid(ch_info))
 -                                      break;
 -                      }
 -              }
 +              min = 14;
 +              max = priv->channel_count;
        } else {
 -              for (i = 0; i < 14; i++) {
 -                      if (priv->channel_info[i].channel !=
 -                          le16_to_cpu(priv->staging_rxon.channel)) {
 -                                      channel =
 -                                              priv->channel_info[i].channel;
 -                                      ch_info = iwl_get_channel_info(priv,
 -                                              band, channel);
 -                                      if (is_channel_valid(ch_info))
 -                                              break;
 -                      }
 +              min = 0;
 +              max = 14;
 +      }
 +
 +      for (i = min; i < max; i++) {
 +              bool busy = false;
 +
 +              for_each_context(priv, ctx) {
 +                      busy = priv->channel_info[i].channel ==
 +                              le16_to_cpu(ctx->staging.channel);
 +                      if (busy)
 +                              break;
                }
 +
 +              if (busy)
 +                      continue;
 +
 +              channel = priv->channel_info[i].channel;
 +              ch_info = iwl_get_channel_info(priv, band, channel);
 +              if (is_channel_valid(ch_info))
 +                      break;
        }
  
        return channel;
  EXPORT_SYMBOL(iwl_get_single_channel_number);
  
  /**
 - * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
 - * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
 - * @channel: Any channel valid for the requested phymode
 + * iwl_set_rxon_channel - Set the band and channel values in staging RXON
 + * @ch: requested channel as a pointer to struct ieee80211_channel
  
 - * In addition to setting the staging RXON, priv->phymode is also set.
 - *
   * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
 - * in the staging RXON flag structure based on the phymode
 + * in the staging RXON flag structure based on the ch->band
   */
 -int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
 +int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
 +                       struct iwl_rxon_context *ctx)
  {
        enum ieee80211_band band = ch->band;
 -      u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
 -
 -      if (!iwl_get_channel_info(priv, band, channel)) {
 -              IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
 -                             channel, band);
 -              return -EINVAL;
 -      }
 +      u16 channel = ch->hw_value;
  
 -      if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
 +      if ((le16_to_cpu(ctx->staging.channel) == channel) &&
            (priv->band == band))
                return 0;
  
 -      priv->staging_rxon.channel = cpu_to_le16(channel);
 +      ctx->staging.channel = cpu_to_le16(channel);
        if (band == IEEE80211_BAND_5GHZ)
 -              priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
 +              ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
        else
 -              priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
 +              ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
  
        priv->band = band;
  
  EXPORT_SYMBOL(iwl_set_rxon_channel);
  
  void iwl_set_flags_for_band(struct iwl_priv *priv,
 +                          struct iwl_rxon_context *ctx,
                            enum ieee80211_band band,
                            struct ieee80211_vif *vif)
  {
        if (band == IEEE80211_BAND_5GHZ) {
 -              priv->staging_rxon.flags &=
 +              ctx->staging.flags &=
                    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
                      | RXON_FLG_CCK_MSK);
 -              priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
 +              ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
        } else {
                /* Copied from iwl_post_associate() */
                if (vif && vif->bss_conf.use_short_slot)
 -                      priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
 +                      ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
                else
 -                      priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 +                      ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
  
 -              priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
 -              priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
 -              priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
 +              ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
 +              ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
 +              ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
        }
  }
  EXPORT_SYMBOL(iwl_set_flags_for_band);
   * initialize rxon structure with default values from eeprom
   */
  void iwl_connection_init_rx_config(struct iwl_priv *priv,
 -                                 struct ieee80211_vif *vif)
 +                                 struct iwl_rxon_context *ctx)
  {
        const struct iwl_channel_info *ch_info;
 -      enum nl80211_iftype type = NL80211_IFTYPE_STATION;
 -
 -      if (vif)
 -              type = vif->type;
  
 -      memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
 +      memset(&ctx->staging, 0, sizeof(ctx->staging));
  
 -      switch (type) {
 +      if (!ctx->vif) {
 +              ctx->staging.dev_type = ctx->unused_devtype;
 +      } else switch (ctx->vif->type) {
        case NL80211_IFTYPE_AP:
 -              priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
 +              ctx->staging.dev_type = ctx->ap_devtype;
                break;
  
        case NL80211_IFTYPE_STATION:
 -              priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
 -              priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
 +              ctx->staging.dev_type = ctx->station_devtype;
 +              ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
                break;
  
        case NL80211_IFTYPE_ADHOC:
 -              priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
 -              priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
 -              priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
 +              ctx->staging.dev_type = ctx->ibss_devtype;
 +              ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
 +              ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
                                                  RXON_FILTER_ACCEPT_GRP_MSK;
                break;
  
        default:
 -              IWL_ERR(priv, "Unsupported interface type %d\n", type);
 +              IWL_ERR(priv, "Unsupported interface type %d\n",
 +                      ctx->vif->type);
                break;
        }
  
        /* TODO:  Figure out when short_preamble would be set and cache from
         * that */
        if (!hw_to_local(priv->hw)->short_preamble)
 -              priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 +              ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
        else
 -              priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
 +              ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
  #endif
  
        ch_info = iwl_get_channel_info(priv, priv->band,
 -                                     le16_to_cpu(priv->active_rxon.channel));
 +                                     le16_to_cpu(ctx->active.channel));
  
        if (!ch_info)
                ch_info = &priv->channel_info[0];
  
 -      priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
 +      ctx->staging.channel = cpu_to_le16(ch_info->channel);
        priv->band = ch_info->band;
  
 -      iwl_set_flags_for_band(priv, priv->band, vif);
 +      iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
  
 -      priv->staging_rxon.ofdm_basic_rates =
 +      ctx->staging.ofdm_basic_rates =
            (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
 -      priv->staging_rxon.cck_basic_rates =
 +      ctx->staging.cck_basic_rates =
            (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
  
        /* clear both MIX and PURE40 mode flag */
 -      priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
 +      ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
                                        RXON_FLG_CHANNEL_MODE_PURE_40);
 +      if (ctx->vif)
 +              memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
  
 -      if (vif)
 -              memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN);
 -
 -      priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
 -      priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
 -      priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
 +      ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
 +      ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
 +      ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
  }
  EXPORT_SYMBOL(iwl_connection_init_rx_config);
  
@@@ -1136,7 -1051,6 +1136,7 @@@ void iwl_set_rate(struct iwl_priv *priv
  {
        const struct ieee80211_supported_band *hw = NULL;
        struct ieee80211_rate *rate;
 +      struct iwl_rxon_context *ctx;
        int i;
  
        hw = iwl_get_hw_mode(priv, priv->band);
  
        IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
  
 -      priv->staging_rxon.cck_basic_rates =
 -          (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
 +      for_each_context(priv, ctx) {
 +              ctx->staging.cck_basic_rates =
 +                  (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
  
 -      priv->staging_rxon.ofdm_basic_rates =
 -         (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
 +              ctx->staging.ofdm_basic_rates =
 +                 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
 +      }
  }
  EXPORT_SYMBOL(iwl_set_rate);
  
  void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
  {
 +      /*
 +       * MULTI-FIXME
 +       * See iwl_mac_channel_switch.
 +       */
 +      struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 +
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
  
        if (priv->switch_rxon.switch_in_progress) {
 -              ieee80211_chswitch_done(priv->vif, is_success);
 +              ieee80211_chswitch_done(ctx->vif, is_success);
                mutex_lock(&priv->mutex);
                priv->switch_rxon.switch_in_progress = false;
                mutex_unlock(&priv->mutex);
@@@ -1188,19 -1094,14 +1188,19 @@@ EXPORT_SYMBOL(iwl_chswitch_done)
  void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 -      struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
        struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
 +      /*
 +       * MULTI-FIXME
 +       * See iwl_mac_channel_switch.
 +       */
 +      struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 +      struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
  
        if (priv->switch_rxon.switch_in_progress) {
                if (!le32_to_cpu(csa->status) &&
                    (csa->channel == priv->switch_rxon.channel)) {
                        rxon->channel = csa->channel;
 -                      priv->staging_rxon.channel = csa->channel;
 +                      ctx->staging.channel = csa->channel;
                        IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
                              le16_to_cpu(csa->channel));
                        iwl_chswitch_done(priv, true);
  EXPORT_SYMBOL(iwl_rx_csa);
  
  #ifdef CONFIG_IWLWIFI_DEBUG
 -void iwl_print_rx_config_cmd(struct iwl_priv *priv)
 +void iwl_print_rx_config_cmd(struct iwl_priv *priv,
 +                           struct iwl_rxon_context *ctx)
  {
 -      struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
 +      struct iwl_rxon_cmd *rxon = &ctx->staging;
  
        IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
        iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
@@@ -1257,8 -1157,7 +1257,8 @@@ void iwl_irq_handle_error(struct iwl_pr
        priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
  #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
 -              iwl_print_rx_config_cmd(priv);
 +              iwl_print_rx_config_cmd(priv,
 +                                      &priv->contexts[IWL_RXON_CTX_BSS]);
  #endif
  
        wake_up_interruptible(&priv->wait_command_queue);
@@@ -1429,6 -1328,25 +1429,6 @@@ out
  EXPORT_SYMBOL(iwl_apm_init);
  
  
 -int iwl_set_hw_params(struct iwl_priv *priv)
 -{
 -      priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
 -      priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
 -      if (priv->cfg->mod_params->amsdu_size_8K)
 -              priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
 -      else
 -              priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
 -
 -      priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
 -
 -      if (priv->cfg->mod_params->disable_11n)
 -              priv->cfg->sku &= ~IWL_SKU_N;
 -
 -      /* Device-specific setup */
 -      return priv->cfg->ops->lib->set_hw_params(priv);
 -}
 -EXPORT_SYMBOL(iwl_set_hw_params);
 -
  int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
  {
        int ret = 0;
@@@ -1578,6 -1496,76 +1578,6 @@@ int iwl_send_statistics_request(struct 
  }
  EXPORT_SYMBOL(iwl_send_statistics_request);
  
 -void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 -{
 -      struct iwl_ct_kill_config cmd;
 -      struct iwl_ct_kill_throttling_config adv_cmd;
 -      unsigned long flags;
 -      int ret = 0;
 -
 -      spin_lock_irqsave(&priv->lock, flags);
 -      iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
 -                  CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
 -      spin_unlock_irqrestore(&priv->lock, flags);
 -      priv->thermal_throttle.ct_kill_toggle = false;
 -
 -      if (priv->cfg->support_ct_kill_exit) {
 -              adv_cmd.critical_temperature_enter =
 -                      cpu_to_le32(priv->hw_params.ct_kill_threshold);
 -              adv_cmd.critical_temperature_exit =
 -                      cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
 -
 -              ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
 -                                     sizeof(adv_cmd), &adv_cmd);
 -              if (ret)
 -                      IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
 -              else
 -                      IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
 -                                      "succeeded, "
 -                                      "critical temperature enter is %d,"
 -                                      "exit is %d\n",
 -                                     priv->hw_params.ct_kill_threshold,
 -                                     priv->hw_params.ct_kill_exit_threshold);
 -      } else {
 -              cmd.critical_temperature_R =
 -                      cpu_to_le32(priv->hw_params.ct_kill_threshold);
 -
 -              ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
 -                                     sizeof(cmd), &cmd);
 -              if (ret)
 -                      IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
 -              else
 -                      IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
 -                                      "succeeded, "
 -                                      "critical temperature is %d\n",
 -                                      priv->hw_params.ct_kill_threshold);
 -      }
 -}
 -EXPORT_SYMBOL(iwl_rf_kill_ct_config);
 -
 -
 -/*
 - * CARD_STATE_CMD
 - *
 - * Use: Sets the device's internal card state to enable, disable, or halt
 - *
 - * When in the 'enable' state the card operates as normal.
 - * When in the 'disable' state, the card enters into a low power mode.
 - * When in the 'halt' state, the card is shut down and must be fully
 - * restarted to come back on.
 - */
 -int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
 -{
 -      struct iwl_host_cmd cmd = {
 -              .id = REPLY_CARD_STATE_CMD,
 -              .len = sizeof(u32),
 -              .data = &flags,
 -              .flags = meta_flag,
 -      };
 -
 -      return iwl_send_cmd(priv, &cmd);
 -}
 -
  void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
                           struct iwl_rx_mem_buffer *rxb)
  {
@@@ -1626,7 -1614,6 +1626,7 @@@ int iwl_mac_conf_tx(struct ieee80211_h
                           const struct ieee80211_tx_queue_params *params)
  {
        struct iwl_priv *priv = hw->priv;
 +      struct iwl_rxon_context *ctx;
        unsigned long flags;
        int q;
  
  
        spin_lock_irqsave(&priv->lock, flags);
  
 -      priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
 -      priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
 -      priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
 -      priv->qos_data.def_qos_parm.ac[q].edca_txop =
 -                      cpu_to_le16((params->txop * 32));
 +      /*
 +       * MULTI-FIXME
 +       * This may need to be done per interface in nl80211/cfg80211/mac80211.
 +       */
 +      for_each_context(priv, ctx) {
 +              ctx->qos_data.def_qos_parm.ac[q].cw_min =
 +                      cpu_to_le16(params->cw_min);
 +              ctx->qos_data.def_qos_parm.ac[q].cw_max =
 +                      cpu_to_le16(params->cw_max);
 +              ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
 +              ctx->qos_data.def_qos_parm.ac[q].edca_txop =
 +                              cpu_to_le16((params->txop * 32));
  
 -      priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
 +              ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
 +      }
  
        spin_unlock_irqrestore(&priv->lock, flags);
  
  }
  EXPORT_SYMBOL(iwl_mac_conf_tx);
  
 +int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +
 +      return priv->ibss_manager == IWL_IBSS_MANAGER;
 +}
 +EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
 +
  static void iwl_ht_conf(struct iwl_priv *priv,
                        struct ieee80211_vif *vif)
  {
        struct iwl_ht_config *ht_conf = &priv->current_ht_config;
        struct ieee80211_sta *sta;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  
        IWL_DEBUG_MAC80211(priv, "enter:\n");
  
 -      if (!ht_conf->is_ht)
 +      if (!ctx->ht.enabled)
                return;
  
 -      ht_conf->ht_protection =
 +      ctx->ht.protection =
                bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
 -      ht_conf->non_GF_STA_present =
 +      ctx->ht.non_gf_sta_present =
                !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
  
        ht_conf->single_chain_sufficient = false;
        IWL_DEBUG_MAC80211(priv, "leave\n");
  }
  
 -static inline void iwl_set_no_assoc(struct iwl_priv *priv)
 +static inline void iwl_set_no_assoc(struct iwl_priv *priv,
 +                                  struct ieee80211_vif *vif)
  {
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 +
        iwl_led_disassociate(priv);
        /*
         * inform the ucode that there is no longer an
         * association and that no more packets should be
         * sent
         */
 -      priv->staging_rxon.filter_flags &=
 -              ~RXON_FILTER_ASSOC_MSK;
 -      priv->staging_rxon.assoc_id = 0;
 -      iwlcore_commit_rxon(priv);
 +      ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 +      ctx->staging.assoc_id = 0;
 +      iwlcore_commit_rxon(priv, ctx);
  }
  
  static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
  
        IWL_DEBUG_MAC80211(priv, "enter\n");
  
 +      lockdep_assert_held(&priv->mutex);
 +
 +      if (!priv->beacon_ctx) {
 +              IWL_ERR(priv, "update beacon but no beacon context!\n");
 +              dev_kfree_skb(skb);
 +              return -EINVAL;
 +      }
 +
        if (!iwl_is_ready_rf(priv)) {
                IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
                return -EIO;
        IWL_DEBUG_MAC80211(priv, "leave\n");
        spin_unlock_irqrestore(&priv->lock, flags);
  
 -      priv->cfg->ops->lib->post_associate(priv, priv->vif);
 +      priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
  
        return 0;
  }
@@@ -1797,7 -1757,6 +1797,7 @@@ void iwl_bss_info_changed(struct ieee80
                          u32 changes)
  {
        struct iwl_priv *priv = hw->priv;
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
        int ret;
  
        IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
                unsigned long flags;
  
                spin_lock_irqsave(&priv->lock, flags);
 -              priv->qos_data.qos_active = bss_conf->qos;
 -              iwl_update_qos(priv);
 +              ctx->qos_data.qos_active = bss_conf->qos;
 +              iwl_update_qos(priv, ctx);
                spin_unlock_irqrestore(&priv->lock, flags);
        }
  
 +      if (changes & BSS_CHANGED_BEACON_ENABLED) {
 +              /*
 +               * the add_interface code must make sure we only ever
 +               * have a single interface that could be beaconing at
 +               * any time.
 +               */
 +              if (vif->bss_conf.enable_beacon)
 +                      priv->beacon_ctx = ctx;
 +              else
 +                      priv->beacon_ctx = NULL;
 +      }
 +
        if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
                dev_kfree_skb(priv->ibss_beacon);
                priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
        }
  
 -      if (changes & BSS_CHANGED_BEACON_INT) {
 -              /* TODO: in AP mode, do something to make this take effect */
 -      }
 +      if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
 +              iwl_send_rxon_timing(priv, ctx);
  
        if (changes & BSS_CHANGED_BSSID) {
                IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
  
                /* mac80211 only sets assoc when in STATION mode */
                if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
 -                      memcpy(priv->staging_rxon.bssid_addr,
 +                      memcpy(ctx->staging.bssid_addr,
                               bss_conf->bssid, ETH_ALEN);
  
                        /* currently needed in a few places */
                        memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
                } else {
 -                      priv->staging_rxon.filter_flags &=
 +                      ctx->staging.filter_flags &=
                                ~RXON_FILTER_ASSOC_MSK;
                }
  
                IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
                                   bss_conf->use_short_preamble);
                if (bss_conf->use_short_preamble)
 -                      priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
 +                      ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
                else
 -                      priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 +                      ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
        }
  
        if (changes & BSS_CHANGED_ERP_CTS_PROT) {
                IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
                if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
 -                      priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
 +                      ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
                else
 -                      priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
 +                      ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
                if (bss_conf->use_cts_prot)
 -                      priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
 +                      ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
                else
 -                      priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
 +                      ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
        }
  
        if (changes & BSS_CHANGED_BASIC_RATES) {
                 * like this here:
                 *
                if (A-band)
 -                      priv->staging_rxon.ofdm_basic_rates =
 +                      ctx->staging.ofdm_basic_rates =
                                bss_conf->basic_rates;
                else
 -                      priv->staging_rxon.ofdm_basic_rates =
 +                      ctx->staging.ofdm_basic_rates =
                                bss_conf->basic_rates >> 4;
 -                      priv->staging_rxon.cck_basic_rates =
 +                      ctx->staging.cck_basic_rates =
                                bss_conf->basic_rates & 0xF;
                 */
        }
                iwl_ht_conf(priv, vif);
  
                if (priv->cfg->ops->hcmd->set_rxon_chain)
 -                      priv->cfg->ops->hcmd->set_rxon_chain(priv);
 +                      priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
        }
  
        if (changes & BSS_CHANGED_ASSOC) {
                        if (!iwl_is_rfkill(priv))
                                priv->cfg->ops->lib->post_associate(priv, vif);
                } else
 -                      iwl_set_no_assoc(priv);
 +                      iwl_set_no_assoc(priv, vif);
        }
  
 -      if (changes && iwl_is_associated(priv) && bss_conf->aid) {
 +      if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
                IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
                                   changes);
 -              ret = iwl_send_rxon_assoc(priv);
 +              ret = iwl_send_rxon_assoc(priv, ctx);
                if (!ret) {
                        /* Sync active_rxon with latest change. */
 -                      memcpy((void *)&priv->active_rxon,
 -                              &priv->staging_rxon,
 +                      memcpy((void *)&ctx->active,
 +                              &ctx->staging,
                                sizeof(struct iwl_rxon_cmd));
                }
        }
  
        if (changes & BSS_CHANGED_BEACON_ENABLED) {
                if (vif->bss_conf.enable_beacon) {
 -                      memcpy(priv->staging_rxon.bssid_addr,
 +                      memcpy(ctx->staging.bssid_addr,
                               bss_conf->bssid, ETH_ALEN);
                        memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
                        iwlcore_config_ap(priv, vif);
                } else
 -                      iwl_set_no_assoc(priv);
 +                      iwl_set_no_assoc(priv, vif);
        }
  
        if (changes & BSS_CHANGED_IBSS) {
                                bss_conf->bssid);
        }
  
 +      if (changes & BSS_CHANGED_IDLE &&
 +          priv->cfg->ops->hcmd->set_pan_params) {
 +              if (priv->cfg->ops->hcmd->set_pan_params(priv))
 +                      IWL_ERR(priv, "failed to update PAN params\n");
 +      }
 +
        mutex_unlock(&priv->mutex);
  
        IWL_DEBUG_MAC80211(priv, "leave\n");
@@@ -1981,21 -1923,17 +1981,21 @@@ EXPORT_SYMBOL(iwl_bss_info_changed)
  
  static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
  {
 -      iwl_connection_init_rx_config(priv, vif);
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 +
 +      iwl_connection_init_rx_config(priv, ctx);
  
        if (priv->cfg->ops->hcmd->set_rxon_chain)
 -              priv->cfg->ops->hcmd->set_rxon_chain(priv);
 +              priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
  
 -      return iwlcore_commit_rxon(priv);
 +      return iwlcore_commit_rxon(priv, ctx);
  }
  
  int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  {
        struct iwl_priv *priv = hw->priv;
 +      struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
 +      struct iwl_rxon_context *tmp, *ctx = NULL;
        int err = 0;
  
        IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
                goto out;
        }
  
 -      if (priv->vif) {
 -              IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
 +      for_each_context(priv, tmp) {
 +              u32 possible_modes =
 +                      tmp->interface_modes | tmp->exclusive_interface_modes;
 +
 +              if (tmp->vif) {
 +                      /* check if this busy context is exclusive */
 +                      if (tmp->exclusive_interface_modes &
 +                                              BIT(tmp->vif->type)) {
 +                              err = -EINVAL;
 +                              goto out;
 +                      }
 +                      continue;
 +              }
 +
 +              if (!(possible_modes & BIT(vif->type)))
 +                      continue;
 +
 +              /* have maybe usable context w/o interface */
 +              ctx = tmp;
 +              break;
 +      }
 +
 +      if (!ctx) {
                err = -EOPNOTSUPP;
                goto out;
        }
  
 -      priv->vif = vif;
 +      vif_priv->ctx = ctx;
 +      ctx->vif = vif;
 +      /*
 +       * This variable will be correct only when there's just
 +       * a single context, but all code using it is for hardware
 +       * that supports only one context.
 +       */
        priv->iw_mode = vif->type;
  
 +      ctx->is_active = true;
 +
        err = iwl_set_mode(priv, vif);
 -      if (err)
 +      if (err) {
 +              if (!ctx->always_active)
 +                      ctx->is_active = false;
                goto out_err;
 +      }
 +
 +      if (priv->cfg->advanced_bt_coexist &&
 +          vif->type == NL80211_IFTYPE_ADHOC) {
 +              /*
 +               * pretend to have high BT traffic as long as we
 +               * are operating in IBSS mode, as this will cause
 +               * the rate scaling etc. to behave as intended.
 +               */
 +              priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
 +      }
  
        goto out;
  
   out_err:
 -      priv->vif = NULL;
 +      ctx->vif = NULL;
        priv->iw_mode = NL80211_IFTYPE_STATION;
   out:
        mutex_unlock(&priv->mutex);
@@@ -2080,36 -1976,30 +2080,36 @@@ void iwl_mac_remove_interface(struct ie
                              struct ieee80211_vif *vif)
  {
        struct iwl_priv *priv = hw->priv;
 -      bool scan_completed = false;
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  
        IWL_DEBUG_MAC80211(priv, "enter\n");
  
        mutex_lock(&priv->mutex);
  
 -      if (iwl_is_ready_rf(priv)) {
 -              iwl_scan_cancel_timeout(priv, 100);
 -              priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 -              iwlcore_commit_rxon(priv);
 -      }
 -      if (priv->vif == vif) {
 -              priv->vif = NULL;
 -              if (priv->scan_vif == vif) {
 -                      scan_completed = true;
 -                      priv->scan_vif = NULL;
 -                      priv->scan_request = NULL;
 -              }
 -              memset(priv->bssid, 0, ETH_ALEN);
 +      WARN_ON(ctx->vif != vif);
 +      ctx->vif = NULL;
 +
 +      if (priv->scan_vif == vif) {
 +              iwl_scan_cancel_timeout(priv, 200);
 +              iwl_force_scan_end(priv);
        }
 -      mutex_unlock(&priv->mutex);
 +      iwl_set_mode(priv, vif);
 +
 +      if (!ctx->always_active)
 +              ctx->is_active = false;
 +
 +      /*
 +       * When removing the IBSS interface, overwrite the
 +       * BT traffic load with the stored one from the last
 +       * notification, if any. If this is a device that
 +       * doesn't implement this, this has no effect since
 +       * both values are the same and zero.
 +       */
 +      if (vif->type == NL80211_IFTYPE_ADHOC)
 +              priv->bt_traffic_load = priv->notif_bt_traffic_load;
  
 -      if (scan_completed)
 -              ieee80211_scan_completed(priv->hw, true);
 +      memset(priv->bssid, 0, ETH_ALEN);
 +      mutex_unlock(&priv->mutex);
  
        IWL_DEBUG_MAC80211(priv, "leave\n");
  
@@@ -2124,9 -2014,7 +2124,9 @@@ int iwl_mac_config(struct ieee80211_hw 
        struct iwl_priv *priv = hw->priv;
        const struct iwl_channel_info *ch_info;
        struct ieee80211_conf *conf = &hw->conf;
 +      struct ieee80211_channel *channel = conf->channel;
        struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 +      struct iwl_rxon_context *ctx;
        unsigned long flags = 0;
        int ret = 0;
        u16 ch;
        mutex_lock(&priv->mutex);
  
        IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
 -                                      conf->channel->hw_value, changed);
 +                                      channel->hw_value, changed);
  
        if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
                        test_bit(STATUS_SCANNING, &priv->status))) {
                 * configured.
                 */
                if (priv->cfg->ops->hcmd->set_rxon_chain)
 -                      priv->cfg->ops->hcmd->set_rxon_chain(priv);
 +                      for_each_context(priv, ctx)
 +                              priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
        }
  
        /* during scanning mac80211 will delay channel setting until
                if (scan_active)
                        goto set_ch_out;
  
 -              ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
 -              ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
 +              ch = channel->hw_value;
 +              ch_info = iwl_get_channel_info(priv, channel->band, ch);
                if (!is_channel_valid(ch_info)) {
                        IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
                        ret = -EINVAL;
  
                spin_lock_irqsave(&priv->lock, flags);
  
 -              /* Configure HT40 channels */
 -              ht_conf->is_ht = conf_is_ht(conf);
 -              if (ht_conf->is_ht) {
 -                      if (conf_is_ht40_minus(conf)) {
 -                              ht_conf->extension_chan_offset =
 -                                      IEEE80211_HT_PARAM_CHA_SEC_BELOW;
 -                              ht_conf->is_40mhz = true;
 -                      } else if (conf_is_ht40_plus(conf)) {
 -                              ht_conf->extension_chan_offset =
 -                                      IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
 -                              ht_conf->is_40mhz = true;
 -                      } else {
 -                              ht_conf->extension_chan_offset =
 -                                      IEEE80211_HT_PARAM_CHA_SEC_NONE;
 -                              ht_conf->is_40mhz = false;
 -                      }
 -              } else
 -                      ht_conf->is_40mhz = false;
 -              /* Default to no protection. Protection mode will later be set
 -               * from BSS config in iwl_ht_conf */
 -              ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
 +              for_each_context(priv, ctx) {
 +                      /* Configure HT40 channels */
 +                      ctx->ht.enabled = conf_is_ht(conf);
 +                      if (ctx->ht.enabled) {
 +                              if (conf_is_ht40_minus(conf)) {
 +                                      ctx->ht.extension_chan_offset =
 +                                              IEEE80211_HT_PARAM_CHA_SEC_BELOW;
 +                                      ctx->ht.is_40mhz = true;
 +                              } else if (conf_is_ht40_plus(conf)) {
 +                                      ctx->ht.extension_chan_offset =
 +                                              IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
 +                                      ctx->ht.is_40mhz = true;
 +                              } else {
 +                                      ctx->ht.extension_chan_offset =
 +                                              IEEE80211_HT_PARAM_CHA_SEC_NONE;
 +                                      ctx->ht.is_40mhz = false;
 +                              }
 +                      } else
 +                              ctx->ht.is_40mhz = false;
  
 -              /* if we are switching from ht to 2.4 clear flags
 -               * from any ht related info since 2.4 does not
 -               * support ht */
 -              if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
 -                      priv->staging_rxon.flags = 0;
 +                      /*
 +                       * Default to no protection. Protection mode will
 +                       * later be set from BSS config in iwl_ht_conf
 +                       */
 +                      ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
 +
 +                      /* if we are switching from ht to 2.4 clear flags
 +                       * from any ht related info since 2.4 does not
 +                       * support ht */
 +                      if ((le16_to_cpu(ctx->staging.channel) != ch))
 +                              ctx->staging.flags = 0;
  
 -              iwl_set_rxon_channel(priv, conf->channel);
 -              iwl_set_rxon_ht(priv, ht_conf);
 +                      iwl_set_rxon_channel(priv, channel, ctx);
 +                      iwl_set_rxon_ht(priv, ht_conf);
 +
 +                      iwl_set_flags_for_band(priv, ctx, channel->band,
 +                                             ctx->vif);
 +              }
  
 -              iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
                spin_unlock_irqrestore(&priv->lock, flags);
  
 -              if (priv->cfg->ops->lib->update_bcast_station)
 -                      ret = priv->cfg->ops->lib->update_bcast_station(priv);
 +              if (priv->cfg->ops->lib->update_bcast_stations)
 +                      ret = priv->cfg->ops->lib->update_bcast_stations(priv);
  
   set_ch_out:
                /* The list of supported rates and rate mask can be different
        if (scan_active)
                goto out;
  
 -      if (memcmp(&priv->active_rxon,
 -                 &priv->staging_rxon, sizeof(priv->staging_rxon)))
 -              iwlcore_commit_rxon(priv);
 -      else
 -              IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
 -
 +      for_each_context(priv, ctx) {
 +              if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
 +                      iwlcore_commit_rxon(priv, ctx);
 +              else
 +                      IWL_DEBUG_INFO(priv,
 +                              "Not re-sending same RXON configuration.\n");
 +      }
  
  out:
        IWL_DEBUG_MAC80211(priv, "leave\n");
@@@ -2269,8 -2148,6 +2269,8 @@@ void iwl_mac_reset_tsf(struct ieee80211
  {
        struct iwl_priv *priv = hw->priv;
        unsigned long flags;
 +      /* IBSS can only be the IWL_RXON_CTX_BSS context */
 +      struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  
        mutex_lock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "enter\n");
  
        spin_unlock_irqrestore(&priv->lock, flags);
  
 +      iwl_scan_cancel_timeout(priv, 100);
        if (!iwl_is_ready_rf(priv)) {
                IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
                mutex_unlock(&priv->mutex);
        /* we are restarting association process
         * clear RXON_FILTER_ASSOC_MSK bit
         */
 -      iwl_scan_cancel_timeout(priv, 100);
 -      priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 -      iwlcore_commit_rxon(priv);
 +      ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 +      iwlcore_commit_rxon(priv, ctx);
  
        iwl_set_rate(priv);
  
@@@ -2711,7 -2588,7 +2711,7 @@@ static void iwl_force_rf_reset(struct i
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
  
 -      if (!iwl_is_associated(priv)) {
 +      if (!iwl_is_any_associated(priv)) {
                IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
                return;
        }
@@@ -2736,6 -2613,11 +2736,11 @@@ int iwl_force_reset(struct iwl_priv *pr
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return -EINVAL;
  
+       if (test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_INFO(priv, "scan in progress.\n");
+               return -EINVAL;
+       }
        if (mode >= IWL_MAX_FORCE_RESET) {
                IWL_DEBUG_INFO(priv, "invalid reset request.\n");
                return -EINVAL;
@@@ -2837,14 -2719,10 +2842,14 @@@ static int iwl_check_stuck_queue(struc
                                                "queue %d, not read %d time\n",
                                                q->id,
                                                q->repeat_same_read_ptr);
 -                              mod_timer(&priv->monitor_recover, jiffies +
 -                                      msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
 +                              if (!priv->cfg->advanced_bt_coexist) {
 +                                      mod_timer(&priv->monitor_recover,
 +                                              jiffies + msecs_to_jiffies(
 +                                              IWL_ONE_HUNDRED_MSECS));
 +                                      return 1;
 +                              }
                        }
 -                      return 1;
 +                      return 0;
                } else {
                        q->last_read_ptr = q->read_ptr;
                        q->repeat_same_read_ptr = 0;
@@@ -2862,27 -2740,25 +2867,27 @@@ void iwl_bg_monitor_recover(unsigned lo
                return;
  
        /* monitor and check for stuck cmd queue */
 -      if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
 +      if (iwl_check_stuck_queue(priv, priv->cmd_queue))
                return;
  
        /* monitor and check for other stuck queues */
 -      if (iwl_is_associated(priv)) {
 +      if (iwl_is_any_associated(priv)) {
                for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
                        /* skip as we already checked the command queue */
 -                      if (cnt == IWL_CMD_QUEUE_NUM)
 +                      if (cnt == priv->cmd_queue)
                                continue;
                        if (iwl_check_stuck_queue(priv, cnt))
                                return;
                }
        }
 -      /*
 -       * Reschedule the timer to occur in
 -       * priv->cfg->monitor_recover_period
 -       */
 -      mod_timer(&priv->monitor_recover,
 -              jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
 +      if (priv->cfg->monitor_recover_period) {
 +              /*
 +               * Reschedule the timer to occur in
 +               * priv->cfg->monitor_recover_period
 +               */
 +              mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
 +                        priv->cfg->monitor_recover_period));
 +      }
  }
  EXPORT_SYMBOL(iwl_bg_monitor_recover);
  
diff --combined include/net/addrconf.h
index 7d178a758acff984e16cb71562c979a602c8459d,4d40c4d0230baeffbafcb57cb6fac99a1f0174b3..958d2749b7a999bae6ea33b9a0cabc44b9042e94
@@@ -121,6 -121,7 +121,7 @@@ static inline int addrconf_finite_timeo
   *    IPv6 Address Label subsystem (addrlabel.c)
   */
  extern int                    ipv6_addr_label_init(void);
+ extern void                   ipv6_addr_label_cleanup(void);
  extern void                   ipv6_addr_label_rtnl_register(void);
  extern u32                    ipv6_addr_label(struct net *net,
                                                const struct in6_addr *addr,
@@@ -174,32 -175,20 +175,32 @@@ extern int ipv6_chk_acast_addr(struct n
  extern int register_inet6addr_notifier(struct notifier_block *nb);
  extern int unregister_inet6addr_notifier(struct notifier_block *nb);
  
 -static inline struct inet6_dev *
 -__in6_dev_get(struct net_device *dev)
 +/**
 + * __in6_dev_get - get inet6_dev pointer from netdevice
 + * @dev: network device
 + *
 + * Caller must hold rcu_read_lock or RTNL, because this function
 + * does not take a reference on the inet6_dev.
 + */
 +static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
  {
 -      return rcu_dereference_check(dev->ip6_ptr,
 -                                   rcu_read_lock_held() ||
 -                                   lockdep_rtnl_is_held());
 +      return rcu_dereference_rtnl(dev->ip6_ptr);
  }
  
 -static inline struct inet6_dev *
 -in6_dev_get(struct net_device *dev)
 +/**
 + * in6_dev_get - get inet6_dev pointer from netdevice
 + * @dev: network device
 + *
 + * This version can be used in any context, and takes a reference
 + * on the inet6_dev. Callers must use in6_dev_put() later to
 + * release this reference.
 + */
 +static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
  {
 -      struct inet6_dev *idev = NULL;
 +      struct inet6_dev *idev;
 +
        rcu_read_lock();
 -      idev = __in6_dev_get(dev);
 +      idev = rcu_dereference(dev->ip6_ptr);
        if (idev)
                atomic_inc(&idev->refcnt);
        rcu_read_unlock();
  
  extern void in6_dev_finish_destroy(struct inet6_dev *idev);
  
 -static inline void
 -in6_dev_put(struct inet6_dev *idev)
 +static inline void in6_dev_put(struct inet6_dev *idev)
  {
        if (atomic_dec_and_test(&idev->refcnt))
                in6_dev_finish_destroy(idev);
  }
  
 -#define __in6_dev_put(idev)  atomic_dec(&(idev)->refcnt)
 -#define in6_dev_hold(idev)   atomic_inc(&(idev)->refcnt)
 +static inline void __in6_dev_put(struct inet6_dev *idev)
 +{
 +      atomic_dec(&idev->refcnt);
 +}
  
 +static inline void in6_dev_hold(struct inet6_dev *idev)
 +{
 +      atomic_inc(&idev->refcnt);
 +}
  
  extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
  
@@@ -232,15 -216,9 +233,15 @@@ static inline void in6_ifa_put(struct i
                inet6_ifa_finish_destroy(ifp);
  }
  
 -#define __in6_ifa_put(ifp)    atomic_dec(&(ifp)->refcnt)
 -#define in6_ifa_hold(ifp)     atomic_inc(&(ifp)->refcnt)
 +static inline void __in6_ifa_put(struct inet6_ifaddr *ifp)
 +{
 +      atomic_dec(&ifp->refcnt);
 +}
  
 +static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
 +{
 +      atomic_inc(&ifp->refcnt);
 +}
  
  
  /*
@@@ -263,23 -241,23 +264,23 @@@ static inline int ipv6_addr_is_multicas
  
  static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
  {
 -      return (((addr->s6_addr32[0] ^ htonl(0xff020000)) |
 +      return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
                addr->s6_addr32[1] | addr->s6_addr32[2] |
 -              (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0);
 +              (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0;
  }
  
  static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
  {
 -      return (((addr->s6_addr32[0] ^ htonl(0xff020000)) |
 +      return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
                addr->s6_addr32[1] | addr->s6_addr32[2] |
 -              (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0);
 +              (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0;
  }
  
  extern int __ipv6_isatap_ifid(u8 *eui, __be32 addr);
  
  static inline int ipv6_addr_is_isatap(const struct in6_addr *addr)
  {
 -      return ((addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE));
 +      return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
  }
  
  #ifdef CONFIG_PROC_FS
diff --combined include/net/tcp.h
index bfc1da43295c578d908b26ed852938bd522c8f04,3e4b33e36602caade361654d0561a65b3fba2224..914a60c7ad62070720401f6e18c66af86f7a48e8
@@@ -475,8 -475,22 +475,22 @@@ extern unsigned int tcp_current_mss(str
  /* Bound MSS / TSO packet size with the half of the window */
  static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
  {
-       if (tp->max_window && pktsize > (tp->max_window >> 1))
-               return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
+       int cutoff;
+       /* When peer uses tiny windows, there is no use in packetizing
+        * to sub-MSS pieces for the sake of SWS or making sure there
+        * are enough packets in the pipe for fast recovery.
+        *
+        * On the other hand, for extremely large MSS devices, handling
+        * smaller than MSS windows in this way does make sense.
+        */
+       if (tp->max_window >= 512)
+               cutoff = (tp->max_window >> 1);
+       else
+               cutoff = tp->max_window;
+       if (cutoff && pktsize > cutoff)
+               return max_t(int, cutoff, 68U - tp->tcp_header_len);
        else
                return pktsize;
  }
@@@ -789,15 -803,6 +803,15 @@@ static inline __u32 tcp_current_ssthres
  /* Use define here intentionally to get WARN_ON location shown at the caller */
  #define tcp_verify_left_out(tp)       WARN_ON(tcp_left_out(tp) > tp->packets_out)
  
 +/*
 + * Convert RFC 3390 larger initial window into an equivalent number of packets.
 + * This is based on the numbers specified in RFC 5681, 3.1.
 + */
 +static inline u32 rfc3390_bytes_to_packets(const u32 smss)
 +{
 +      return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
 +}
 +
  extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
  extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
  
diff --combined net/core/dev.c
index 72e99835e5b860335d456fbf486005df1db95388,660dd41aaaa6629c0d59547e04d23353ba935af8..42b200fdf12e42042fa30a45eec6ab33bc2bef8e
  #include <linux/random.h>
  #include <trace/events/napi.h>
  #include <linux/pci.h>
 +#include <linux/inetdevice.h>
  
  #include "net-sysfs.h"
  
@@@ -372,14 -371,6 +372,14 @@@ static inline void netdev_set_addr_lock
   *                                                    --ANK (980803)
   */
  
 +static inline struct list_head *ptype_head(const struct packet_type *pt)
 +{
 +      if (pt->type == htons(ETH_P_ALL))
 +              return &ptype_all;
 +      else
 +              return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 +}
 +
  /**
   *    dev_add_pack - add packet handler
   *    @pt: packet type declaration
  
  void dev_add_pack(struct packet_type *pt)
  {
 -      int hash;
 +      struct list_head *head = ptype_head(pt);
  
 -      spin_lock_bh(&ptype_lock);
 -      if (pt->type == htons(ETH_P_ALL))
 -              list_add_rcu(&pt->list, &ptype_all);
 -      else {
 -              hash = ntohs(pt->type) & PTYPE_HASH_MASK;
 -              list_add_rcu(&pt->list, &ptype_base[hash]);
 -      }
 -      spin_unlock_bh(&ptype_lock);
 +      spin_lock(&ptype_lock);
 +      list_add_rcu(&pt->list, head);
 +      spin_unlock(&ptype_lock);
  }
  EXPORT_SYMBOL(dev_add_pack);
  
   */
  void __dev_remove_pack(struct packet_type *pt)
  {
 -      struct list_head *head;
 +      struct list_head *head = ptype_head(pt);
        struct packet_type *pt1;
  
 -      spin_lock_bh(&ptype_lock);
 -
 -      if (pt->type == htons(ETH_P_ALL))
 -              head = &ptype_all;
 -      else
 -              head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 +      spin_lock(&ptype_lock);
  
        list_for_each_entry(pt1, head, list) {
                if (pt == pt1) {
  
        printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
  out:
 -      spin_unlock_bh(&ptype_lock);
 +      spin_unlock(&ptype_lock);
  }
  EXPORT_SYMBOL(__dev_remove_pack);
  
@@@ -1901,14 -1902,14 +1901,14 @@@ static int dev_gso_segment(struct sk_bu
  
  /*
   * Try to orphan skb early, right before transmission by the device.
 - * We cannot orphan skb if tx timestamp is requested, since
 - * drivers need to call skb_tstamp_tx() to send the timestamp.
 + * We cannot orphan skb if tx timestamp is requested or the sk-reference
 + * is needed on driver level for other reasons, e.g. see net/can/raw.c
   */
  static inline void skb_orphan_try(struct sk_buff *skb)
  {
        struct sock *sk = skb->sk;
  
 -      if (sk && !skb_tx(skb)->flags) {
 +      if (sk && !skb_shinfo(skb)->tx_flags) {
                /* skb_tx_hash() wont be able to get sk.
                 * We copy sk_hash into skb->rxhash
                 */
@@@ -1929,7 -1930,7 +1929,7 @@@ static inline int skb_needs_linearize(s
                                      struct net_device *dev)
  {
        return skb_is_nonlinear(skb) &&
 -             ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
 +             ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
                (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
                                              illegal_highdma(dev, skb))));
  }
@@@ -2258,44 -2259,69 +2258,44 @@@ static inline void ____napi_schedule(st
        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  }
  
 -#ifdef CONFIG_RPS
 -
 -/* One global table that all flow-based protocols share. */
 -struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
 -EXPORT_SYMBOL(rps_sock_flow_table);
 -
  /*
 - * get_rps_cpu is called from netif_receive_skb and returns the target
 - * CPU from the RPS map of the receiving queue for a given skb.
 - * rcu_read_lock must be held on entry.
 + * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
 + * and src/dst port numbers. Returns a non-zero hash number on success
 + * and 0 on failure.
   */
 -static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 -                     struct rps_dev_flow **rflowp)
 +__u32 __skb_get_rxhash(struct sk_buff *skb)
  {
 +      int nhoff, hash = 0, poff;
        struct ipv6hdr *ip6;
        struct iphdr *ip;
 -      struct netdev_rx_queue *rxqueue;
 -      struct rps_map *map;
 -      struct rps_dev_flow_table *flow_table;
 -      struct rps_sock_flow_table *sock_flow_table;
 -      int cpu = -1;
        u8 ip_proto;
 -      u16 tcpu;
        u32 addr1, addr2, ihl;
        union {
                u32 v32;
                u16 v16[2];
        } ports;
  
 -      if (skb_rx_queue_recorded(skb)) {
 -              u16 index = skb_get_rx_queue(skb);
 -              if (unlikely(index >= dev->num_rx_queues)) {
 -                      WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
 -                              "on queue %u, but number of RX queues is %u\n",
 -                              dev->name, index, dev->num_rx_queues);
 -                      goto done;
 -              }
 -              rxqueue = dev->_rx + index;
 -      } else
 -              rxqueue = dev->_rx;
 -
 -      if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
 -              goto done;
 -
 -      if (skb->rxhash)
 -              goto got_hash; /* Skip hash computation on packet header */
 +      nhoff = skb_network_offset(skb);
  
        switch (skb->protocol) {
        case __constant_htons(ETH_P_IP):
 -              if (!pskb_may_pull(skb, sizeof(*ip)))
 +              if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
                        goto done;
  
 -              ip = (struct iphdr *) skb->data;
 -              ip_proto = ip->protocol;
 +              ip = (struct iphdr *) (skb->data + nhoff);
 +              if (ip->frag_off & htons(IP_MF | IP_OFFSET))
 +                      ip_proto = 0;
 +              else
 +                      ip_proto = ip->protocol;
                addr1 = (__force u32) ip->saddr;
                addr2 = (__force u32) ip->daddr;
                ihl = ip->ihl;
                break;
        case __constant_htons(ETH_P_IPV6):
 -              if (!pskb_may_pull(skb, sizeof(*ip6)))
 +              if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
                        goto done;
  
 -              ip6 = (struct ipv6hdr *) skb->data;
 +              ip6 = (struct ipv6hdr *) (skb->data + nhoff);
                ip_proto = ip6->nexthdr;
                addr1 = (__force u32) ip6->saddr.s6_addr32[3];
                addr2 = (__force u32) ip6->daddr.s6_addr32[3];
        default:
                goto done;
        }
 -      switch (ip_proto) {
 -      case IPPROTO_TCP:
 -      case IPPROTO_UDP:
 -      case IPPROTO_DCCP:
 -      case IPPROTO_ESP:
 -      case IPPROTO_AH:
 -      case IPPROTO_SCTP:
 -      case IPPROTO_UDPLITE:
 -              if (pskb_may_pull(skb, (ihl * 4) + 4)) {
 -                      ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
 +
 +      ports.v32 = 0;
 +      poff = proto_ports_offset(ip_proto);
 +      if (poff >= 0) {
 +              nhoff += ihl * 4 + poff;
 +              if (pskb_may_pull(skb, nhoff + 4)) {
 +                      ports.v32 = * (__force u32 *) (skb->data + nhoff);
                        if (ports.v16[1] < ports.v16[0])
                                swap(ports.v16[0], ports.v16[1]);
 -                      break;
                }
 -      default:
 -              ports.v32 = 0;
 -              break;
        }
  
        /* get a consistent hash (same value on both flow directions) */
        if (addr2 < addr1)
                swap(addr1, addr2);
 -      skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
 -      if (!skb->rxhash)
 -              skb->rxhash = 1;
  
 -got_hash:
 +      hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
 +      if (!hash)
 +              hash = 1;
 +
 +done:
 +      return hash;
 +}
 +EXPORT_SYMBOL(__skb_get_rxhash);
 +
 +#ifdef CONFIG_RPS
 +
 +/* One global table that all flow-based protocols share. */
 +struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
 +EXPORT_SYMBOL(rps_sock_flow_table);
 +
 +/*
 + * get_rps_cpu is called from netif_receive_skb and returns the target
 + * CPU from the RPS map of the receiving queue for a given skb.
 + * rcu_read_lock must be held on entry.
 + */
 +static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 +                     struct rps_dev_flow **rflowp)
 +{
 +      struct netdev_rx_queue *rxqueue;
 +      struct rps_map *map = NULL;
 +      struct rps_dev_flow_table *flow_table;
 +      struct rps_sock_flow_table *sock_flow_table;
 +      int cpu = -1;
 +      u16 tcpu;
 +
 +      if (skb_rx_queue_recorded(skb)) {
 +              u16 index = skb_get_rx_queue(skb);
 +              if (unlikely(index >= dev->num_rx_queues)) {
 +                      WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
 +                              "on queue %u, but number of RX queues is %u\n",
 +                              dev->name, index, dev->num_rx_queues);
 +                      goto done;
 +              }
 +              rxqueue = dev->_rx + index;
 +      } else
 +              rxqueue = dev->_rx;
 +
 +      if (rxqueue->rps_map) {
 +              map = rcu_dereference(rxqueue->rps_map);
 +              if (map && map->len == 1) {
 +                      tcpu = map->cpus[0];
 +                      if (cpu_online(tcpu))
 +                              cpu = tcpu;
 +                      goto done;
 +              }
 +      } else if (!rxqueue->rps_flow_table) {
 +              goto done;
 +      }
 +
 +      skb_reset_network_header(skb);
 +      if (!skb_get_rxhash(skb))
 +              goto done;
 +
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
        sock_flow_table = rcu_dereference(rps_sock_flow_table);
        if (flow_table && sock_flow_table) {
                }
        }
  
 -      map = rcu_dereference(rxqueue->rps_map);
        if (map) {
                tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
  
@@@ -2848,8 -2828,8 +2848,8 @@@ static int __netif_receive_skb(struct s
        if (!netdev_tstamp_prequeue)
                net_timestamp_check(skb);
  
 -      if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
 -              return NET_RX_SUCCESS;
 +      if (vlan_tx_tag_present(skb))
 +              vlan_hwaccel_do_receive(skb);
  
        /* if we've gotten here through NAPI, check netpoll */
        if (netpoll_receive_skb(skb))
@@@ -3070,7 -3050,7 +3070,7 @@@ out
        return netif_receive_skb(skb);
  }
  
 -static void napi_gro_flush(struct napi_struct *napi)
 +inline void napi_gro_flush(struct napi_struct *napi)
  {
        struct sk_buff *skb, *next;
  
        napi->gro_count = 0;
        napi->gro_list = NULL;
  }
 +EXPORT_SYMBOL(napi_gro_flush);
  
  enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
        if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
                goto normal;
  
 -      if (skb_is_gso(skb) || skb_has_frags(skb))
 +      if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
  
        rcu_read_lock();
@@@ -3177,18 -3156,16 +3177,18 @@@ normal
  }
  EXPORT_SYMBOL(dev_gro_receive);
  
 -static gro_result_t
 +static inline gro_result_t
  __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
        struct sk_buff *p;
  
        for (p = napi->gro_list; p; p = p->next) {
 -              NAPI_GRO_CB(p)->same_flow =
 -                      (p->dev == skb->dev) &&
 -                      !compare_ether_header(skb_mac_header(p),
 +              unsigned long diffs;
 +
 +              diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
 +              diffs |= compare_ether_header(skb_mac_header(p),
                                              skb_gro_mac_header(skb));
 +              NAPI_GRO_CB(p)->same_flow = !diffs;
                NAPI_GRO_CB(p)->flush = 0;
        }
  
@@@ -4868,7 -4845,7 +4868,7 @@@ static void rollback_registered_many(st
        dev = list_first_entry(head, struct net_device, unreg_list);
        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
  
-       synchronize_net();
+       rcu_barrier();
  
        list_for_each_entry(dev, head, unreg_list)
                dev_put(dev);
@@@ -4964,34 -4941,6 +4964,34 @@@ void netif_stacked_transfer_operstate(c
  }
  EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  
 +static int netif_alloc_rx_queues(struct net_device *dev)
 +{
 +#ifdef CONFIG_RPS
 +      unsigned int i, count = dev->num_rx_queues;
 +
 +      if (count) {
 +              struct netdev_rx_queue *rx;
 +
 +              rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
 +              if (!rx) {
 +                      pr_err("netdev: Unable to allocate %u rx queues.\n",
 +                             count);
 +                      return -ENOMEM;
 +              }
 +              dev->_rx = rx;
 +              atomic_set(&rx->count, count);
 +
 +              /*
 +               * Set a pointer to first element in the array which holds the
 +               * reference count.
 +               */
 +              for (i = 0; i < count; i++)
 +                      rx[i].first = rx;
 +      }
 +#endif
 +      return 0;
 +}
 +
  /**
   *    register_netdevice      - register a network device
   *    @dev: device to register
@@@ -5029,10 -4978,24 +5029,10 @@@ int register_netdevice(struct net_devic
  
        dev->iflink = -1;
  
 -#ifdef CONFIG_RPS
 -      if (!dev->num_rx_queues) {
 -              /*
 -               * Allocate a single RX queue if driver never called
 -               * alloc_netdev_mq
 -               */
 -
 -              dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
 -              if (!dev->_rx) {
 -                      ret = -ENOMEM;
 -                      goto out;
 -              }
 +      ret = netif_alloc_rx_queues(dev);
 +      if (ret)
 +              goto out;
  
 -              dev->_rx->first = dev->_rx;
 -              atomic_set(&dev->_rx->count, 1);
 -              dev->num_rx_queues = 1;
 -      }
 -#endif
        /* Init, if this function is available */
        if (dev->netdev_ops->ndo_init) {
                ret = dev->netdev_ops->ndo_init(dev);
        if (dev->features & NETIF_F_SG)
                dev->features |= NETIF_F_GSO;
  
 +      /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
 +       * vlan_dev_init() will do the dev->features check, so these features
 +       * are enabled only if supported by underlying device.
 +       */
 +      dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
 +
        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
        ret = notifier_to_errno(ret);
        if (ret)
@@@ -5307,7 -5264,7 +5307,7 @@@ void netdev_run_todo(void
  
                /* paranoia */
                BUG_ON(atomic_read(&dev->refcnt));
 -              WARN_ON(dev->ip_ptr);
 +              WARN_ON(rcu_dereference_raw(dev->ip_ptr));
                WARN_ON(dev->ip6_ptr);
                WARN_ON(dev->dn_ptr);
  
@@@ -5429,6 -5386,10 +5429,6 @@@ struct net_device *alloc_netdev_mq(int 
        struct net_device *dev;
        size_t alloc_size;
        struct net_device *p;
 -#ifdef CONFIG_RPS
 -      struct netdev_rx_queue *rx;
 -      int i;
 -#endif
  
        BUG_ON(strlen(name) >= sizeof(dev->name));
  
                goto free_p;
        }
  
 -#ifdef CONFIG_RPS
 -      rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
 -      if (!rx) {
 -              printk(KERN_ERR "alloc_netdev: Unable to allocate "
 -                     "rx queues.\n");
 -              goto free_tx;
 -      }
 -
 -      atomic_set(&rx->count, queue_count);
 -
 -      /*
 -       * Set a pointer to first element in the array which holds the
 -       * reference count.
 -       */
 -      for (i = 0; i < queue_count; i++)
 -              rx[i].first = rx;
 -#endif
  
        dev = PTR_ALIGN(p, NETDEV_ALIGN);
        dev->padded = (char *)dev - (char *)p;
  
        if (dev_addr_init(dev))
 -              goto free_rx;
 +              goto free_tx;
  
        dev_mc_init(dev);
        dev_uc_init(dev);
        dev->real_num_tx_queues = queue_count;
  
  #ifdef CONFIG_RPS
 -      dev->_rx = rx;
        dev->num_rx_queues = queue_count;
  #endif
  
        strcpy(dev->name, name);
        return dev;
  
 -free_rx:
 -#ifdef CONFIG_RPS
 -      kfree(rx);
  free_tx:
 -#endif
        kfree(tx);
  free_p:
        kfree(p);
@@@ -5675,10 -5658,6 +5675,10 @@@ int dev_change_net_namespace(struct net
  
        /* Notify protocols, that we are about to destroy
           this device. They should clean all the things.
 +
 +         Note that dev->reg_state stays at NETREG_REGISTERED.
 +         This is wanted because this way 8021q and macvlan know
 +         the device is just moving and can keep their slaves up.
        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
diff --combined net/core/sock.c
index f3a06c40d5e023585852f3bf031c443a86cc5d7f,ef30e9d286e703cccaffbd5ee10394cc167fe057..42365deeba279c9b625dfb00213811722855046c
@@@ -1351,9 -1351,9 +1351,9 @@@ int sock_i_uid(struct sock *sk
  {
        int uid;
  
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return uid;
  }
  EXPORT_SYMBOL(sock_i_uid);
@@@ -1362,9 -1362,9 +1362,9 @@@ unsigned long sock_i_ino(struct sock *s
  {
        unsigned long ino;
  
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return ino;
  }
  EXPORT_SYMBOL(sock_i_ino);
@@@ -1557,8 -1557,6 +1557,8 @@@ struct sk_buff *sock_alloc_send_skb(str
  EXPORT_SYMBOL(sock_alloc_send_skb);
  
  static void __lock_sock(struct sock *sk)
 +      __releases(&sk->sk_lock.slock)
 +      __acquires(&sk->sk_lock.slock)
  {
        DEFINE_WAIT(wait);
  
  }
  
  static void __release_sock(struct sock *sk)
 +      __releases(&sk->sk_lock.slock)
 +      __acquires(&sk->sk_lock.slock)
  {
        struct sk_buff *skb = sk->sk_backlog.head;
  
diff --combined net/ipv4/ip_gre.c
index 0967d02fefd87adc4e7b4931f656ab1945ccecf0,35c93e8b6a4694561c838641e546b609e8cbfaa0..5d6ddcb7403b487ea1fa347595685f7f73061803
@@@ -44,9 -44,8 +44,9 @@@
  #include <net/net_namespace.h>
  #include <net/netns/generic.h>
  #include <net/rtnetlink.h>
 +#include <net/gre.h>
  
- #ifdef CONFIG_IPV6
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  #include <net/ipv6.h>
  #include <net/ip6_fib.h>
  #include <net/ip6_route.h>
@@@ -129,7 -128,7 +129,7 @@@ static int ipgre_tunnel_bind_dev(struc
  
  static int ipgre_net_id __read_mostly;
  struct ipgre_net {
 -      struct ip_tunnel *tunnels[4][HASH_SIZE];
 +      struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
  
        struct net_device *fb_tunnel_dev;
  };
  #define tunnels_l     tunnels[1]
  #define tunnels_wc    tunnels[0]
  /*
 - * Locking : hash tables are protected by RCU and a spinlock
 + * Locking : hash tables are protected by RCU and RTNL
   */
 -static DEFINE_SPINLOCK(ipgre_lock);
  
  #define for_each_ip_tunnel_rcu(start) \
        for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@@ -173,8 -173,8 +173,8 @@@ static struct ip_tunnel * ipgre_tunnel_
  {
        struct net *net = dev_net(dev);
        int link = dev->ifindex;
 -      unsigned h0 = HASH(remote);
 -      unsigned h1 = HASH(key);
 +      unsigned int h0 = HASH(remote);
 +      unsigned int h1 = HASH(key);
        struct ip_tunnel *t, *cand = NULL;
        struct ipgre_net *ign = net_generic(net, ipgre_net_id);
        int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
        return NULL;
  }
  
 -static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
 +static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
                struct ip_tunnel_parm *parms)
  {
        __be32 remote = parms->iph.daddr;
        __be32 local = parms->iph.saddr;
        __be32 key = parms->i_key;
 -      unsigned h = HASH(key);
 +      unsigned int h = HASH(key);
        int prio = 0;
  
        if (local)
        return &ign->tunnels[prio][h];
  }
  
 -static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
 +static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
                struct ip_tunnel *t)
  {
        return __ipgre_bucket(ign, &t->parms);
  
  static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
  {
 -      struct ip_tunnel **tp = ipgre_bucket(ign, t);
 +      struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
  
 -      spin_lock_bh(&ipgre_lock);
 -      t->next = *tp;
 +      rcu_assign_pointer(t->next, rtnl_dereference(*tp));
        rcu_assign_pointer(*tp, t);
 -      spin_unlock_bh(&ipgre_lock);
  }
  
  static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
  {
 -      struct ip_tunnel **tp;
 -
 -      for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
 -              if (t == *tp) {
 -                      spin_lock_bh(&ipgre_lock);
 -                      *tp = t->next;
 -                      spin_unlock_bh(&ipgre_lock);
 +      struct ip_tunnel __rcu **tp;
 +      struct ip_tunnel *iter;
 +
 +      for (tp = ipgre_bucket(ign, t);
 +           (iter = rtnl_dereference(*tp)) != NULL;
 +           tp = &iter->next) {
 +              if (t == iter) {
 +                      rcu_assign_pointer(*tp, t->next);
                        break;
                }
        }
@@@ -345,13 -346,10 +345,13 @@@ static struct ip_tunnel *ipgre_tunnel_f
        __be32 local = parms->iph.saddr;
        __be32 key = parms->i_key;
        int link = parms->link;
 -      struct ip_tunnel *t, **tp;
 +      struct ip_tunnel *t;
 +      struct ip_tunnel __rcu **tp;
        struct ipgre_net *ign = net_generic(net, ipgre_net_id);
  
 -      for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
 +      for (tp = __ipgre_bucket(ign, parms);
 +           (t = rtnl_dereference(*tp)) != NULL;
 +           tp = &t->next)
                if (local == t->parms.iph.saddr &&
                    remote == t->parms.iph.daddr &&
                    key == t->parms.i_key &&
        return t;
  }
  
 -static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
 +static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
                struct ip_tunnel_parm *parms, int create)
  {
        struct ip_tunnel *t, *nt;
@@@ -647,11 -645,9 +647,11 @@@ static int ipgre_rcv(struct sk_buff *sk
                skb_reset_network_header(skb);
                ipgre_ecn_decapsulate(iph, skb);
  
 -              netif_rx(skb);
 +              if (netif_rx(skb) == NET_RX_DROP)
 +                      stats->rx_dropped++;
 +
                rcu_read_unlock();
 -              return(0);
 +              return 0;
        }
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  
@@@ -659,7 -655,7 +659,7 @@@ drop
        rcu_read_unlock();
  drop_nolock:
        kfree_skb(skb);
 -      return(0);
 +      return 0;
  }
  
  static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        u8     tos;
        __be16 df;
        struct rtable *rt;                      /* Route to the other host */
 -      struct net_device *tdev;                        /* Device to other host */
 +      struct net_device *tdev;                /* Device to other host */
        struct iphdr  *iph;                     /* Our new IP header */
        unsigned int max_headroom;              /* The extra header space needed */
        int    gre_hlen;
                        if ((dst = rt->rt_gateway) == 0)
                                goto tx_error_icmp;
                }
- #ifdef CONFIG_IPV6
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
                        struct in6_addr *addr6;
                        int addr_type;
                        goto tx_error;
                }
        }
- #ifdef CONFIG_IPV6
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
                struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
  
        if ((iph->ttl = tiph->ttl) == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
- #ifdef CONFIG_IPV6
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6))
                        iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
  #endif
@@@ -1016,7 -1012,7 +1016,7 @@@ ipgre_tunnel_ioctl (struct net_device *
                                        break;
                                }
                        } else {
 -                              unsigned nflags = 0;
 +                              unsigned int nflags = 0;
  
                                t = netdev_priv(dev);
  
@@@ -1129,7 -1125,7 +1129,7 @@@ static int ipgre_tunnel_change_mtu(stru
  
  static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
                        unsigned short type,
 -                      const void *daddr, const void *saddr, unsigned len)
 +                      const void *daddr, const void *saddr, unsigned int len)
  {
        struct ip_tunnel *t = netdev_priv(dev);
        struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
@@@ -1278,13 -1274,14 +1278,13 @@@ static void ipgre_fb_tunnel_init(struc
        tunnel->hlen            = sizeof(struct iphdr) + 4;
  
        dev_hold(dev);
 -      ign->tunnels_wc[0]      = tunnel;
 +      rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
  }
  
  
 -static const struct net_protocol ipgre_protocol = {
 -      .handler        =       ipgre_rcv,
 -      .err_handler    =       ipgre_err,
 -      .netns_ok       =       1,
 +static const struct gre_protocol ipgre_protocol = {
 +      .handler     = ipgre_rcv,
 +      .err_handler = ipgre_err,
  };
  
  static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
        for (prio = 0; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
 -                      struct ip_tunnel *t = ign->tunnels[prio][h];
 +                      struct ip_tunnel *t;
 +
 +                      t = rtnl_dereference(ign->tunnels[prio][h]);
  
                        while (t != NULL) {
                                unregister_netdevice_queue(t->dev, head);
 -                              t = t->next;
 +                              t = rtnl_dereference(t->next);
                        }
                }
        }
@@@ -1527,7 -1522,7 +1527,7 @@@ static int ipgre_changelink(struct net_
                t = nt;
  
                if (dev->type != ARPHRD_ETHER) {
 -                      unsigned nflags = 0;
 +                      unsigned int nflags = 0;
  
                        if (ipv4_is_multicast(p.iph.daddr))
                                nflags = IFF_BROADCAST;
@@@ -1668,7 -1663,7 +1668,7 @@@ static int __init ipgre_init(void
        if (err < 0)
                return err;
  
 -      err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
 +      err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
        if (err < 0) {
                printk(KERN_INFO "ipgre init: can't add protocol\n");
                goto add_proto_failed;
@@@ -1688,7 -1683,7 +1688,7 @@@ out
  tap_ops_failed:
        rtnl_link_unregister(&ipgre_link_ops);
  rtnl_link_failed:
 -      inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
 +      gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
  add_proto_failed:
        unregister_pernet_device(&ipgre_net_ops);
        goto out;
@@@ -1698,7 -1693,7 +1698,7 @@@ static void __exit ipgre_fini(void
  {
        rtnl_link_unregister(&ipgre_tap_ops);
        rtnl_link_unregister(&ipgre_link_ops);
 -      if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
 +      if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
                printk(KERN_INFO "ipgre close: can't remove protocol\n");
        unregister_pernet_device(&ipgre_net_ops);
  }
diff --combined net/ipv4/ip_output.c
index 3551b6dc741905f59528d1eee49e0e9437974225,7649d7750075d184896a9da6d37ed9a35ea5f403..439d2a34ee4411b932eefb3a6fc51383e8db7125
@@@ -487,10 -487,9 +487,9 @@@ int ip_fragment(struct sk_buff *skb, in
         * LATER: this step can be merged to real generation of fragments,
         * we can switch to copy when see the first bad fragment.
         */
 -      if (skb_has_frags(skb)) {
 +      if (skb_has_frag_list(skb)) {
-               struct sk_buff *frag;
+               struct sk_buff *frag, *frag2;
                int first_len = skb_pagelen(skb);
-               int truesizes = 0;
  
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                           goto slow_path;
+                               goto slow_path_clean;
  
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path;
+                               goto slow_path_clean;
  
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
                        }
-                       truesizes += frag->truesize;
+                       skb->truesize -= frag->truesize;
                }
  
                /* Everything is OK. Generate! */
                frag = skb_shinfo(skb)->frag_list;
                skb_frag_list_init(skb);
                skb->data_len = first_len - skb_headlen(skb);
-               skb->truesize -= truesizes;
                skb->len = first_len;
                iph->tot_len = htons(first_len);
                iph->frag_off = htons(IP_MF);
                }
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
                return err;
+ slow_path_clean:
+               skb_walk_frags(skb, frag2) {
+                       if (frag2 == frag)
+                               break;
+                       frag2->sk = NULL;
+                       frag2->destructor = NULL;
+                       skb->truesize += frag2->truesize;
+               }
        }
  
  slow_path:
@@@ -837,9 -844,10 +844,9 @@@ int ip_append_data(struct sock *sk
                inet->cork.length = 0;
                sk->sk_sndmsg_page = NULL;
                sk->sk_sndmsg_off = 0;
 -              if ((exthdrlen = rt->dst.header_len) != 0) {
 -                      length += exthdrlen;
 -                      transhdrlen += exthdrlen;
 -              }
 +              exthdrlen = rt->dst.header_len;
 +              length += exthdrlen;
 +              transhdrlen += exthdrlen;
        } else {
                rt = (struct rtable *)inet->cork.dst;
                if (inet->cork.flags & IPCORK_OPT)
@@@ -926,19 -934,16 +933,19 @@@ alloc_new_skb
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
                        else
 -                              alloclen = datalen + fragheaderlen;
 +                              alloclen = fraglen;
  
                        /* The last fragment gets additional space at tail.
                         * Note, with MSG_MORE we overallocate on fragments,
                         * because we have no idea what fragment will be
                         * the last.
                         */
 -                      if (datalen == length + fraggap)
 +                      if (datalen == length + fraggap) {
                                alloclen += rt->dst.trailer_len;
 -
 +                              /* make sure mtu is not reached */
 +                              if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
 +                                      datalen -= ALIGN(rt->dst.trailer_len, 8);
 +                      }
                        if (transhdrlen) {
                                skb = sock_alloc_send_skb(sk,
                                                alloclen + hh_len + 15,
                                else
                                        /* only the initial fragment is
                                           time stamped */
 -                                      ipc->shtx.flags = 0;
 +                                      ipc->tx_flags = 0;
                        }
                        if (skb == NULL)
                                goto error;
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);
 -                      *skb_tx(skb) = ipc->shtx;
 +                      skb_shinfo(skb)->tx_flags = ipc->tx_flags;
  
                        /*
                         *      Find where to start putting bytes.
@@@ -1386,7 -1391,7 +1393,7 @@@ void ip_send_reply(struct sock *sk, str
  
        daddr = ipc.addr = rt->rt_src;
        ipc.opt = NULL;
 -      ipc.shtx.flags = 0;
 +      ipc.tx_flags = 0;
  
        if (replyopts.opt.optlen) {
                ipc.opt = &replyopts.opt;
diff --combined net/ipv4/tcp.c
index 3e8a4dbc721b849e88fed7cb6fa32273b02017aa,95d75d443927a9b0f06cd9934ea5a3749e5ff09f..19192c5fe67a52eed8381d6f36ce690eb0fef751
@@@ -386,8 -386,6 +386,6 @@@ unsigned int tcp_poll(struct file *file
         */
  
        mask = 0;
-       if (sk->sk_err)
-               mask = POLLERR;
  
        /*
         * POLLHUP is certainly not done right. But poll() doesn't
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
        }
+       /* This barrier is coupled with smp_wmb() in tcp_reset() */
+       smp_rmb();
+       if (sk->sk_err)
+               mask |= POLLERR;
        return mask;
  }
  EXPORT_SYMBOL(tcp_poll);
@@@ -2389,12 -2392,7 +2392,12 @@@ static int do_tcp_setsockopt(struct soc
                err = tp->af_specific->md5_parse(sk, optval, optlen);
                break;
  #endif
 -
 +      case TCP_USER_TIMEOUT:
 +              /* Cap the max timeout in ms TCP will retry/retrans
 +               * before giving up and aborting (ETIMEDOUT) a connection.
 +               */
 +              icsk->icsk_user_timeout = msecs_to_jiffies(val);
 +              break;
        default:
                err = -ENOPROTOOPT;
                break;
@@@ -2613,10 -2611,6 +2616,10 @@@ static int do_tcp_getsockopt(struct soc
        case TCP_THIN_DUPACK:
                val = tp->thin_dupack;
                break;
 +
 +      case TCP_USER_TIMEOUT:
 +              val = jiffies_to_msecs(icsk->icsk_user_timeout);
 +              break;
        default:
                return -ENOPROTOOPT;
        }
diff --combined net/ipv4/tcp_input.c
index 51966b3f9719c8e3a7249a2d3c817eb2439cb136,149e79ac289181fba7cb375c3dac833fdd56d711..fabc09a58d7f95e8650785415d5cd5a9f78d4e3e
@@@ -805,12 -805,25 +805,12 @@@ void tcp_update_metrics(struct sock *sk
        }
  }
  
 -/* Numbers are taken from RFC3390.
 - *
 - * John Heffner states:
 - *
 - *    The RFC specifies a window of no more than 4380 bytes
 - *    unless 2*MSS > 4380.  Reading the pseudocode in the RFC
 - *    is a bit misleading because they use a clamp at 4380 bytes
 - *    rather than use a multiplier in the relevant range.
 - */
  __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
  {
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
  
 -      if (!cwnd) {
 -              if (tp->mss_cache > 1460)
 -                      cwnd = 2;
 -              else
 -                      cwnd = (tp->mss_cache > 1095) ? 3 : 4;
 -      }
 +      if (!cwnd)
 +              cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
  }
  
@@@ -2301,7 -2314,7 +2301,7 @@@ static inline int tcp_dupack_heuristics
  
  static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
  {
 -      return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
 +      return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
  }
  
  static inline int tcp_head_timedout(struct sock *sk)
@@@ -3398,8 -3411,8 +3398,8 @@@ static void tcp_ack_probe(struct sock *
  
  static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
  {
 -      return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
 -              inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
 +      return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
 +              inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
  }
  
  static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@@ -3416,9 -3429,9 +3416,9 @@@ static inline int tcp_may_update_window
                                        const u32 ack, const u32 ack_seq,
                                        const u32 nwin)
  {
 -      return (after(ack, tp->snd_una) ||
 +      return  after(ack, tp->snd_una) ||
                after(ack_seq, tp->snd_wl1) ||
 -              (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
 +              (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
  }
  
  /* Update our send window.
@@@ -4035,6 -4048,8 +4035,8 @@@ static void tcp_reset(struct sock *sk
        default:
                sk->sk_err = ECONNRESET;
        }
+       /* This barrier is coupled with smp_rmb() in tcp_poll() */
+       smp_wmb();
  
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_error_report(sk);
diff --combined net/ipv6/addrconf.c
index 89aa54394a08aff72ca124651703b2ce689e694a,324fac3b6c16db0238d1139649038bcaa9aaced2..8c88340278f564b8910c094e6e1131609f6bf5a7
@@@ -243,7 -243,7 +243,7 @@@ static inline bool addrconf_qdisc_ok(co
  /* Check if a route is valid prefix route */
  static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
  {
 -      return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0);
 +      return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
  }
  
  static void addrconf_del_timer(struct inet6_ifaddr *ifp)
@@@ -2964,8 -2964,7 +2964,8 @@@ static void addrconf_dad_completed(stru
           start sending router solicitations.
         */
  
 -      if (ifp->idev->cnf.forwarding == 0 &&
 +      if ((ifp->idev->cnf.forwarding == 0 ||
 +           ifp->idev->cnf.forwarding == 2) &&
            ifp->idev->cnf.rtr_solicits > 0 &&
            (dev->flags&IFF_LOOPBACK) == 0 &&
            (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
@@@ -4638,10 -4637,12 +4638,12 @@@ int __init addrconf_init(void
        if (err < 0) {
                printk(KERN_CRIT "IPv6 Addrconf:"
                       " cannot initialize default policy table: %d.\n", err);
-               return err;
+               goto out;
        }
  
-       register_pernet_subsys(&addrconf_ops);
+       err = register_pernet_subsys(&addrconf_ops);
+       if (err < 0)
+               goto out_addrlabel;
  
        /* The addrconf netdev notifier requires that loopback_dev
         * has it's ipv6 private information allocated and setup
@@@ -4693,7 -4694,9 +4695,9 @@@ errout
        unregister_netdevice_notifier(&ipv6_dev_notf);
  errlo:
        unregister_pernet_subsys(&addrconf_ops);
+ out_addrlabel:
+       ipv6_addr_label_cleanup();
+ out:
        return err;
  }
  
@@@ -4704,6 -4707,7 +4708,7 @@@ void addrconf_cleanup(void
  
        unregister_netdevice_notifier(&ipv6_dev_notf);
        unregister_pernet_subsys(&addrconf_ops);
+       ipv6_addr_label_cleanup();
  
        rtnl_lock();
  
diff --combined net/ipv6/addrlabel.c
index 921dcf6c271ad3af9c4dc1bffa88239ac1c5568e,8175f802651bec4481080823a73f60a21d3a2f36..c8993e5a337c25dd2dbe85f182a137dd82386c5d
@@@ -393,6 -393,11 +393,11 @@@ int __init ipv6_addr_label_init(void
        return register_pernet_subsys(&ipv6_addr_label_ops);
  }
  
+ void ipv6_addr_label_cleanup(void)
+ {
+       unregister_pernet_subsys(&ipv6_addr_label_ops);
+ }
  static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
        [IFAL_ADDRESS]          = { .len = sizeof(struct in6_addr), },
        [IFAL_LABEL]            = { .len = sizeof(u32), },
@@@ -513,9 -518,10 +518,9 @@@ static int ip6addrlbl_dump(struct sk_bu
  
  static inline int ip6addrlbl_msgsize(void)
  {
 -      return (NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
 +      return NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
                + nla_total_size(16)    /* IFAL_ADDRESS */
 -              + nla_total_size(4)     /* IFAL_LABEL */
 -      );
 +              + nla_total_size(4);    /* IFAL_LABEL */
  }
  
  static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
diff --combined net/ipv6/ip6_output.c
index efbbbce68f9e2914c51bb2b7d3e2e1d52dae44a4,980912ed7a388bd2404b8cb934b2b61e67e12a6c..99157b4cd56e2fa619939a17aa5b716680fc8799
@@@ -637,9 -637,9 +637,9 @@@ static int ip6_fragment(struct sk_buff 
        }
        mtu -= hlen + sizeof(struct frag_hdr);
  
 -      if (skb_has_frags(skb)) {
 +      if (skb_has_frag_list(skb)) {
                int first_len = skb_pagelen(skb);
-               int truesizes = 0;
+               struct sk_buff *frag2;
  
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                           goto slow_path;
+                               goto slow_path_clean;
  
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path;
+                               goto slow_path_clean;
  
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
-                               truesizes += frag->truesize;
                        }
+                       skb->truesize -= frag->truesize;
                }
  
                err = 0;
  
                first_len = skb_pagelen(skb);
                skb->data_len = first_len - skb_headlen(skb);
-               skb->truesize -= truesizes;
                skb->len = first_len;
                ipv6_hdr(skb)->payload_len = htons(first_len -
                                                   sizeof(struct ipv6hdr));
                              IPSTATS_MIB_FRAGFAILS);
                dst_release(&rt->dst);
                return err;
+ slow_path_clean:
+               skb_walk_frags(skb, frag2) {
+                       if (frag2 == frag)
+                               break;
+                       frag2->sk = NULL;
+                       frag2->destructor = NULL;
+                       skb->truesize += frag2->truesize;
+               }
        }
  
  slow_path:
@@@ -870,8 -878,8 +878,8 @@@ static inline int ip6_rt_check(struct r
                               struct in6_addr *fl_addr,
                               struct in6_addr *addr_cache)
  {
 -      return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 -              (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
 +      return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 +              (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
  }
  
  static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
diff --combined net/rds/tcp_connect.c
index a65ee78db0c54e1062186b9b132fe46fac380c91,c519939e8da98fd3ae8252355a0ebb9efac4acd5..af95c8e058fc0d45096234aa8aeb7c8da6fda175
@@@ -43,9 -43,9 +43,9 @@@ void rds_tcp_state_change(struct sock *
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
  
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
 -      if (conn == NULL) {
 +      if (!conn) {
                state_change = sk->sk_state_change;
                goto out;
        }
@@@ -68,7 -68,7 +68,7 @@@
                        break;
        }
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        state_change(sk);
  }
  
diff --combined net/rds/tcp_listen.c
index ae27869dfc2137a342918f558082c1e58e1d4e88,27844f231d103a4e49e542d670eaca66d01e761d..8b5cc4aa8868702cc743a87c96a100e58809865f
@@@ -114,9 -114,9 +114,9 @@@ void rds_tcp_listen_data_ready(struct s
  
        rdsdebug("listen data ready sk %p\n", sk);
  
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
 -      if (ready == NULL) { /* check for teardown race */
 +      if (!ready) { /* check for teardown race */
                ready = sk->sk_data_ready;
                goto out;
        }
                queue_work(rds_wq, &rds_tcp_listen_work);
  
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
  }
  
 -int __init rds_tcp_listen_init(void)
 +int rds_tcp_listen_init(void)
  {
        struct sockaddr_in sin;
        struct socket *sock = NULL;
@@@ -178,7 -178,7 +178,7 @@@ void rds_tcp_listen_stop(void
        struct socket *sock = rds_tcp_listen_sock;
        struct sock *sk;
  
 -      if (sock == NULL)
 +      if (!sock)
                return;
  
        sk = sock->sk;
diff --combined net/rds/tcp_recv.c
index 7017f3af80b671cca7b403d99e8006444eb1c8bb,e43797404102efcc2ed45117456e839b5425adb1..67263fbee623a467bb543d70c1f0fd2646137157
@@@ -39,7 -39,7 +39,7 @@@
  
  static struct kmem_cache *rds_tcp_incoming_slab;
  
 -void rds_tcp_inc_purge(struct rds_incoming *inc)
 +static void rds_tcp_inc_purge(struct rds_incoming *inc)
  {
        struct rds_tcp_incoming *tinc;
        tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
@@@ -190,10 -190,10 +190,10 @@@ static int rds_tcp_data_recv(read_descr
         * processing.
         */
        while (left) {
 -              if (tinc == NULL) {
 +              if (!tinc) {
                        tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
                                                arg->gfp);
 -                      if (tinc == NULL) {
 +                      if (!tinc) {
                                desc->error = -ENOMEM;
                                goto out;
                        }
  
                if (left && tc->t_tinc_data_rem) {
                        clone = skb_clone(skb, arg->gfp);
 -                      if (clone == NULL) {
 +                      if (!clone) {
                                desc->error = -ENOMEM;
                                goto out;
                        }
@@@ -324,9 -324,9 +324,9 @@@ void rds_tcp_data_ready(struct sock *sk
  
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
  
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
 -      if (conn == NULL) { /* check for teardown race */
 +      if (!conn) { /* check for teardown race */
                ready = sk->sk_data_ready;
                goto out;
        }
        if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
  }
  
 -int __init rds_tcp_recv_init(void)
 +int rds_tcp_recv_init(void)
  {
        rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
                                        sizeof(struct rds_tcp_incoming),
                                        0, 0, NULL);
 -      if (rds_tcp_incoming_slab == NULL)
 +      if (!rds_tcp_incoming_slab)
                return -ENOMEM;
        return 0;
  }
diff --combined net/rds/tcp_send.c
index 2979fb4a4b9aaf8df3b0216d69cac828885df398,2f012a07d94d16d3aa01d4c78d0894e939dfd751..aa16841afbdf9c3597d466f46ac3f49185861d88
@@@ -76,6 -76,56 +76,6 @@@ int rds_tcp_sendmsg(struct socket *sock
        return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
  }
  
 -/* the core send_sem serializes this with other xmit and shutdown */
 -int rds_tcp_xmit_cong_map(struct rds_connection *conn,
 -                        struct rds_cong_map *map, unsigned long offset)
 -{
 -      static struct rds_header rds_tcp_map_header = {
 -              .h_flags = RDS_FLAG_CONG_BITMAP,
 -      };
 -      struct rds_tcp_connection *tc = conn->c_transport_data;
 -      unsigned long i;
 -      int ret;
 -      int copied = 0;
 -
 -      /* Some problem claims cpu_to_be32(constant) isn't a constant. */
 -      rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
 -
 -      if (offset < sizeof(struct rds_header)) {
 -              ret = rds_tcp_sendmsg(tc->t_sock,
 -                                    (void *)&rds_tcp_map_header + offset,
 -                                    sizeof(struct rds_header) - offset);
 -              if (ret <= 0)
 -                      return ret;
 -              offset += ret;
 -              copied = ret;
 -              if (offset < sizeof(struct rds_header))
 -                      return ret;
 -      }
 -
 -      offset -= sizeof(struct rds_header);
 -      i = offset / PAGE_SIZE;
 -      offset = offset % PAGE_SIZE;
 -      BUG_ON(i >= RDS_CONG_MAP_PAGES);
 -
 -      do {
 -              ret = tc->t_sock->ops->sendpage(tc->t_sock,
 -                                      virt_to_page(map->m_page_addrs[i]),
 -                                      offset, PAGE_SIZE - offset,
 -                                      MSG_DONTWAIT);
 -              if (ret <= 0)
 -                      break;
 -              copied += ret;
 -              offset += ret;
 -              if (offset == PAGE_SIZE) {
 -                      offset = 0;
 -                      i++;
 -              }
 -      } while (i < RDS_CONG_MAP_PAGES);
 -
 -        return copied ? copied : ret;
 -}
 -
  /* the core send_sem serializes this with other xmit and shutdown */
  int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
                 unsigned int hdr_off, unsigned int sg, unsigned int off)
                        goto out;
        }
  
 -      while (sg < rm->m_nents) {
 +      while (sg < rm->data.op_nents) {
                ret = tc->t_sock->ops->sendpage(tc->t_sock,
 -                                              sg_page(&rm->m_sg[sg]),
 -                                              rm->m_sg[sg].offset + off,
 -                                              rm->m_sg[sg].length - off,
 +                                              sg_page(&rm->data.op_sg[sg]),
 +                                              rm->data.op_sg[sg].offset + off,
 +                                              rm->data.op_sg[sg].length - off,
                                                MSG_DONTWAIT|MSG_NOSIGNAL);
 -              rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]),
 -                       rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off,
 +              rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
 +                       rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
                         ret);
                if (ret <= 0)
                        break;
  
                off += ret;
                done += ret;
 -              if (off == rm->m_sg[sg].length) {
 +              if (off == rm->data.op_sg[sg].length) {
                        off = 0;
                        sg++;
                }
@@@ -174,9 -224,9 +174,9 @@@ void rds_tcp_write_space(struct sock *s
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
  
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
 -      if (conn == NULL) {
 +      if (!conn) {
                write_space = sk->sk_write_space;
                goto out;
        }
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
  
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
  
        /*
         * write_space is only called when data leaves tcp's send queue if
diff --combined net/sctp/output.c
index 901764b17aeedb1c77a1a8c4b2eb615670004fc5,bcc4590ccaf21bb988a7827614f71a39ffa31318..60600d337a3a9c6d4a589918dee8f18edf5f6961
@@@ -41,8 -41,6 +41,8 @@@
   * be incorporated into the next SCTP release.
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/wait.h>
@@@ -94,7 -92,6 +94,6 @@@ struct sctp_packet *sctp_packet_config(
        SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
                          packet, vtag);
  
-       sctp_packet_reset(packet);
        packet->vtag = vtag;
  
        if (ecn_capable && sctp_packet_empty(packet)) {