]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Fri, 11 Jun 2010 20:32:31 +0000 (13:32 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 11 Jun 2010 20:32:31 +0000 (13:32 -0700)
1  2 
drivers/net/8139cp.c
drivers/net/8139too.c
drivers/net/gianfar.c
drivers/net/r8169.c
drivers/net/usb/asix.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
include/linux/skbuff.h
net/core/dev.c
net/ipv4/ipmr.c
net/mac80211/mlme.c

diff --combined drivers/net/8139cp.c
index e949ba80127d66d469641fdc7f4d2ec0055742e1,284a5f4a63ac8a48c955125c64e67ae8b89e16f9..4a4f6b81e32de9ae8c85f9dd6aa379927d0ffaa0
@@@ -322,7 -322,7 +322,7 @@@ struct cp_dma_stats 
        __le32                  rx_ok_mcast;
        __le16                  tx_abort;
        __le16                  tx_underrun;
 -} __attribute__((packed));
 +} __packed;
  
  struct cp_extra_stats {
        unsigned long           rx_frags;
@@@ -598,8 -598,8 +598,8 @@@ rx_next
                        goto rx_status_loop;
  
                spin_lock_irqsave(&cp->lock, flags);
-               cpw16_f(IntrMask, cp_intr_mask);
                __napi_complete(napi);
+               cpw16_f(IntrMask, cp_intr_mask);
                spin_unlock_irqrestore(&cp->lock, flags);
        }
  
diff --combined drivers/net/8139too.c
index cc7d46238801867b8ee5a16b4811b4eb9ca663b2,97d8068b372b228d6764055ad1d0529439c940af..f5166dccd8dfa6b0436eb6397b20e7f05adee58e
@@@ -662,7 -662,7 +662,7 @@@ static const struct ethtool_ops rtl8139
  /* read MMIO register */
  #define RTL_R8(reg)           ioread8 (ioaddr + (reg))
  #define RTL_R16(reg)          ioread16 (ioaddr + (reg))
 -#define RTL_R32(reg)          ((unsigned long) ioread32 (ioaddr + (reg)))
 +#define RTL_R32(reg)          ioread32 (ioaddr + (reg))
  
  
  static const u16 rtl8139_intr_mask =
@@@ -860,8 -860,9 +860,9 @@@ retry
                }
  
        /* if unknown chip, assume array element #0, original RTL-8139 in this case */
+       i = 0;
        dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
 -      dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
 +      dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
        tp->chipset = 0;
  
  match:
@@@ -1642,7 -1643,7 +1643,7 @@@ static void rtl8139_tx_timeout_task (st
        netdev_dbg(dev, "Tx queue start entry %ld  dirty entry %ld\n",
                   tp->cur_tx, tp->dirty_tx);
        for (i = 0; i < NUM_TX_DESC; i++)
 -              netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
 +              netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
                           i, RTL_R32(TxStatus0 + (i * 4)),
                           i == tp->dirty_tx % NUM_TX_DESC ?
                           " (queue head)" : "");
@@@ -2088,8 -2089,8 +2089,8 @@@ static int rtl8139_poll(struct napi_str
                 * again when we think we are done.
                 */
                spin_lock_irqsave(&tp->lock, flags);
-               RTL_W16_F(IntrMask, rtl8139_intr_mask);
                __napi_complete(napi);
+               RTL_W16_F(IntrMask, rtl8139_intr_mask);
                spin_unlock_irqrestore(&tp->lock, flags);
        }
        spin_unlock(&tp->rx_lock);
@@@ -2486,7 -2487,7 +2487,7 @@@ static void __set_rx_mode (struct net_d
        int rx_mode;
        u32 tmp;
  
 -      netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n",
 +      netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
                   dev->flags, RTL_R32(RxConfig));
  
        /* Note: do not reorder, GCC is clever about common statements. */
diff --combined drivers/net/gianfar.c
index ab54821f6709d3d14492df761bd95025d67f2aa2,46c69cd0655358a74c726d9b0ae56fd1d2c7d0fd..8a17bf096ff653eb81b6b5575d8c200ad4f6dddc
@@@ -681,8 -681,8 +681,8 @@@ static int gfar_of_init(struct of_devic
                priv->rx_queue[i] = NULL;
  
        for (i = 0; i < priv->num_tx_queues; i++) {
 -              priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
 -                              sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
 +              priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
 +                                          GFP_KERNEL);
                if (!priv->tx_queue[i]) {
                        err = -ENOMEM;
                        goto tx_alloc_failed;
        }
  
        for (i = 0; i < priv->num_rx_queues; i++) {
 -              priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
 -                                      sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
 +              priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
 +                                          GFP_KERNEL);
                if (!priv->rx_queue[i]) {
                        err = -ENOMEM;
                        goto rx_alloc_failed;
                        FSL_GIANFAR_DEV_HAS_CSUM |
                        FSL_GIANFAR_DEV_HAS_VLAN |
                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
-                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
-                       FSL_GIANFAR_DEV_HAS_TIMER;
+                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
  
        ctype = of_get_property(np, "phy-connection-type", NULL);
  
diff --combined drivers/net/r8169.c
index 6949504589dbb1ecb48bfbb230ef652e7b0342f0,96b6cfbf0a3a682b14216fa23a9e737a48973aaa..239d7efdd4504921794a92b20a8bde8071e8c507
@@@ -88,7 -88,7 +88,7 @@@ static const int multicast_filter_limi
  #define RTL_W32(reg, val32)   writel ((val32), ioaddr + (reg))
  #define RTL_R8(reg)           readb (ioaddr + (reg))
  #define RTL_R16(reg)          readw (ioaddr + (reg))
 -#define RTL_R32(reg)          ((unsigned long) readl (ioaddr + (reg)))
 +#define RTL_R32(reg)          readl (ioaddr + (reg))
  
  enum mac_version {
        RTL_GIGA_MAC_NONE   = 0x00,
@@@ -560,10 -560,10 +560,10 @@@ static void mdio_write(void __iomem *io
                udelay(25);
        }
        /*
-        * Some configurations require a small delay even after the write
-        * completed indication or the next write might fail.
+        * According to hardware specs a 20us delay is required after write
+        * complete indication, but before sending next command.
         */
-       udelay(25);
+       udelay(20);
  }
  
  static int mdio_read(void __iomem *ioaddr, int reg_addr)
                }
                udelay(25);
        }
+       /*
+        * According to hardware specs a 20us delay is required after read
+        * complete indication, but before sending next command.
+        */
+       udelay(20);
        return value;
  }
  
diff --combined drivers/net/usb/asix.c
index 7e797ed0439adb1745d2611244bfc1efa6478106,9516f382a6baf76dfdafe63afdd3b22380a2595f..aea4645be7f68956472e6f6fbefeadb2e049f060
@@@ -179,7 -179,7 +179,7 @@@ struct ax88172_int_data 
        __le16 res2;
        u8 status;
        __le16 res3;
 -} __attribute__ ((packed));
 +} __packed;
  
  static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                            u16 size, void *data)
@@@ -344,7 -344,7 +344,7 @@@ static int asix_rx_fixup(struct usbnet 
                        return 2;
                }
  
-               if (size > ETH_FRAME_LEN) {
+               if (size > dev->net->mtu + ETH_HLEN) {
                        netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
                                   size);
                        return 0;
index e9b34b0cb197bfac62fb876dd95bbdbbeeea544f,11491354e5b5bf3de80d0b91ee13d7a203a0e914..8b55a5b14152adee9c966f0d6705dddfad769ffb
@@@ -651,7 -651,7 +651,7 @@@ static int i2400m_download_chunk(struc
        struct {
                struct i2400m_bootrom_header cmd;
                u8 cmd_payload[chunk_len];
 -      } __attribute__((packed)) *buf;
 +      } __packed *buf;
        struct i2400m_bootrom_header ack;
  
        d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
@@@ -794,7 -794,7 +794,7 @@@ int i2400m_dnload_finalize(struct i2400
        struct {
                struct i2400m_bootrom_header cmd;
                u8 cmd_pl[0];
 -      } __attribute__((packed)) *cmd_buf;
 +      } __packed *cmd_buf;
        size_t signature_block_offset, signature_block_size;
  
        d_fnstart(3, dev, "offset %zu\n", offset);
@@@ -1029,7 -1029,7 +1029,7 @@@ int i2400m_read_mac_addr(struct i2400m 
        struct {
                struct i2400m_bootrom_header ack;
                u8 ack_pl[16];
 -      } __attribute__((packed)) ack_buf;
 +      } __packed ack_buf;
  
        d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
        cmd = i2400m->bm_cmd_buf;
@@@ -1115,7 -1115,7 +1115,7 @@@ int i2400m_dnload_init_signed(struct i2
        struct {
                struct i2400m_bootrom_header cmd;
                struct i2400m_bcf_hdr cmd_pl;
 -      } __attribute__((packed)) *cmd_buf;
 +      } __packed *cmd_buf;
        struct i2400m_bootrom_header ack;
  
        d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
@@@ -1192,7 -1192,7 +1192,7 @@@ int i2400m_fw_hdr_check(struct i2400m *
        unsigned module_type, header_len, major_version, minor_version,
                module_id, module_vendor, date, size;
  
-       module_type = bcf_hdr->module_type;
+       module_type = le32_to_cpu(bcf_hdr->module_type);
        header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
        major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
                >> 16;
index 10a0acdb9dd42cf9611c83d70eb405c601a707b5,a732f1094e5d3a983a102d7afe3038eddc2eb6a2..f9134ceb69ab9d603ee5e86f58abe5615be745fb
@@@ -469,8 -469,7 +469,8 @@@ static void iwlagn_tx_cmd_build_rate(st
        }
  
        /* Set up antennas */
 -      priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
 +      priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
 +                                            priv->hw_params.valid_tx_ant);
        rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  
        /* Set the rate in the TX cmd */
@@@ -568,7 -567,10 +568,7 @@@ int iwlagn_tx_skb(struct iwl_priv *priv
        hdr_len = ieee80211_hdrlen(fc);
  
        /* Find index into station table for destination station */
 -      if (!info->control.sta)
 -              sta_id = priv->hw_params.bcast_sta_id;
 -      else
 -              sta_id = iwl_sta_id(info->control.sta);
 +      sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
        if (sta_id == IWL_INVALID_STATION) {
                IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
                               hdr->addr1);
        }
  
        txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
 +
 +      /* irqs already disabled/saved above when locking priv->lock */
 +      spin_lock(&priv->sta_lock);
 +
        if (ieee80211_is_data_qos(fc)) {
                qc = ieee80211_get_qos_ctl(hdr);
                tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 -              if (unlikely(tid >= MAX_TID_COUNT))
 +              if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
 +                      spin_unlock(&priv->sta_lock);
                        goto drop_unlock;
 +              }
                seq_number = priv->stations[sta_id].tid[tid].seq_number;
                seq_number &= IEEE80211_SCTL_SEQ;
                hdr->seq_ctrl = hdr->seq_ctrl &
        swq_id = txq->swq_id;
        q = &txq->q;
  
 -      if (unlikely(iwl_queue_space(q) < q->high_mark))
 +      if (unlikely(iwl_queue_space(q) < q->high_mark)) {
 +              spin_unlock(&priv->sta_lock);
                goto drop_unlock;
 +      }
  
 -      if (ieee80211_is_data_qos(fc))
 +      if (ieee80211_is_data_qos(fc)) {
                priv->stations[sta_id].tid[tid].tfds_in_queue++;
 +              if (!ieee80211_has_morefrags(fc))
 +                      priv->stations[sta_id].tid[tid].seq_number = seq_number;
 +      }
 +
 +      spin_unlock(&priv->sta_lock);
  
        /* Set up driver data for this TFD */
        memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
 -      txq->txb[q->write_ptr].skb[0] = skb;
 +      txq->txb[q->write_ptr].skb = skb;
  
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
        out_cmd = txq->cmd[q->write_ptr];
        txcmd_phys = pci_map_single(priv->pci_dev,
                                    &out_cmd->hdr, len,
                                    PCI_DMA_BIDIRECTIONAL);
 -      pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
 -      pci_unmap_len_set(out_meta, len, len);
 +      dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
 +      dma_unmap_len_set(out_meta, len, len);
        /* Add buffer containing Tx command and MAC(!) header to TFD's
         * first entry */
        priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                txq->need_update = 1;
 -              if (qc)
 -                      priv->stations[sta_id].tid[tid].seq_number = seq_number;
        } else {
                wait_write_ptr = 1;
                txq->need_update = 0;
@@@ -1018,8 -1009,6 +1018,8 @@@ int iwlagn_tx_agg_start(struct iwl_pri
        if (ret)
                return ret;
  
 +      spin_lock_irqsave(&priv->sta_lock, flags);
 +      tid_data = &priv->stations[sta_id].tid[tid];
        if (tid_data->tfds_in_queue == 0) {
                IWL_DEBUG_HT(priv, "HW queue is empty\n");
                tid_data->agg.state = IWL_AGG_ON;
                             tid_data->tfds_in_queue);
                tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
        }
 +      spin_unlock_irqrestore(&priv->sta_lock, flags);
        return ret;
  }
  
@@@ -1052,14 -1040,11 +1052,14 @@@ int iwlagn_tx_agg_stop(struct iwl_priv 
                return -ENXIO;
        }
  
 +      spin_lock_irqsave(&priv->sta_lock, flags);
 +
        if (priv->stations[sta_id].tid[tid].agg.state ==
                                IWL_EMPTYING_HW_QUEUE_ADDBA) {
                IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
 +              spin_unlock_irqrestore(&priv->sta_lock, flags);
                return 0;
        }
  
                IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
                priv->stations[sta_id].tid[tid].agg.state =
                                IWL_EMPTYING_HW_QUEUE_DELBA;
 +              spin_unlock_irqrestore(&priv->sta_lock, flags);
                return 0;
        }
  
        IWL_DEBUG_HT(priv, "HW queue is empty\n");
        priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  
 -      spin_lock_irqsave(&priv->lock, flags);
 +      /* do not restore/save irqs */
 +      spin_unlock(&priv->sta_lock);
 +      spin_lock(&priv->lock);
 +
        /*
         * the only reason this call can fail is queue number out of range,
         * which can happen if uCode is reloaded and all the station
@@@ -1111,8 -1092,6 +1111,8 @@@ int iwlagn_txq_check_empty(struct iwl_p
        u8 *addr = priv->stations[sta_id].sta.sta.addr;
        struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  
 +      WARN_ON(!spin_is_locked(&priv->sta_lock));
 +
        switch (priv->stations[sta_id].tid[tid].agg.state) {
        case IWL_EMPTYING_HW_QUEUE_DELBA:
                /* We are reclaiming the last packet of the */
                }
                break;
        }
 +
        return 0;
  }
  
@@@ -1147,6 -1125,7 +1147,7 @@@ static void iwlagn_tx_status(struct iwl
        struct ieee80211_sta *sta;
        struct iwl_station_priv *sta_priv;
  
+       rcu_read_lock();
        sta = ieee80211_find_sta(priv->vif, hdr->addr1);
        if (sta) {
                sta_priv = (void *)sta->drv_priv;
                    atomic_dec_return(&sta_priv->pending_frames) == 0)
                        ieee80211_sta_block_awake(priv->hw, sta, false);
        }
+       rcu_read_unlock();
  
        ieee80211_tx_status_irqsafe(priv->hw, skb);
  }
@@@ -1179,12 -1159,12 +1181,12 @@@ int iwlagn_tx_queue_reclaim(struct iwl_
             q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  
                tx_info = &txq->txb[txq->q.read_ptr];
 -              iwlagn_tx_status(priv, tx_info->skb[0]);
 +              iwlagn_tx_status(priv, tx_info->skb);
  
 -              hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
 +              hdr = (struct ieee80211_hdr *)tx_info->skb->data;
                if (hdr && ieee80211_is_data_qos(hdr->frame_control))
                        nfreed++;
 -              tx_info->skb[0] = NULL;
 +              tx_info->skb = NULL;
  
                if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
                        priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
@@@ -1208,7 -1188,7 +1210,7 @@@ static int iwlagn_tx_status_reply_compr
        int i, sh, ack;
        u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
        u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
 -      u64 bitmap;
 +      u64 bitmap, sent_bitmap;
        int successes = 0;
        struct ieee80211_tx_info *info;
  
  
        /* check for success or failure according to the
         * transmitted bitmap and block-ack bitmap */
 -      bitmap &= agg->bitmap;
 +      sent_bitmap = bitmap & agg->bitmap;
  
        /* For each frame attempted in aggregation,
         * update driver's record of tx frame's status. */
 -      for (i = 0; i < agg->frame_count ; i++) {
 -              ack = bitmap & (1ULL << i);
 -              successes += !!ack;
 +      i = 0;
 +      while (sent_bitmap) {
 +              ack = sent_bitmap & 1ULL;
 +              successes += ack;
                IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
                        ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
                        agg->start_idx + i);
 +              sent_bitmap >>= 1;
 +              ++i;
        }
  
 -      info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
 +      info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
        memset(&info->status, 0, sizeof(info->status));
        info->flags |= IEEE80211_TX_STAT_ACK;
        info->flags |= IEEE80211_TX_STAT_AMPDU;
        info->status.ampdu_ack_len = successes;
 -      info->status.ampdu_ack_map = bitmap;
        info->status.ampdu_len = agg->frame_count;
        iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  
@@@ -1303,7 -1281,6 +1305,7 @@@ void iwlagn_rx_reply_compressed_ba(stru
        int index;
        int sta_id;
        int tid;
 +      unsigned long flags;
  
        /* "flow" corresponds to Tx queue */
        u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
        /* Find index just before block-ack window */
        index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  
 -      /* TODO: Need to get this copy more safely - now good for debug */
 +      spin_lock_irqsave(&priv->sta_lock, flags);
  
        IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
                           "sta_id = %d\n",
  
                iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
        }
 +
 +      spin_unlock_irqrestore(&priv->sta_lock, flags);
  }
diff --combined include/linux/skbuff.h
index 645e78d395fd6dd9d6f9c53b0fa31c887ef2e06b,f89e7fd59a4c4029a5e301b30c14df0ee7ae2699..122d08396e566e63e9dd77da5ab7144b2558cb70
@@@ -380,7 -380,10 +380,10 @@@ struct sk_buff 
        kmemcheck_bitfield_begin(flags2);
        __u16                   queue_mapping:16;
  #ifdef CONFIG_IPV6_NDISC_NODETYPE
-       __u8                    ndisc_nodetype:2;
+       __u8                    ndisc_nodetype:2,
+                               deliver_no_wcard:1;
+ #else
+       __u8                    deliver_no_wcard:1;
  #endif
        kmemcheck_bitfield_end(flags2);
  
@@@ -2129,8 -2132,7 +2132,8 @@@ static inline bool skb_warn_if_lro(cons
        /* LRO sets gso_size but not gso_type, whereas if GSO is really
         * wanted then gso_type will be set. */
        struct skb_shared_info *shinfo = skb_shinfo(skb);
 -      if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
 +      if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
 +          unlikely(shinfo->gso_type == 0)) {
                __skb_warn_lro_forwarding(skb);
                return true;
        }
diff --combined net/core/dev.c
index 6f330cee79a6720bf2ca0ce7c4261dd7651ef9d2,2b3bf53bc687aab3c9cfb32cce902eaebb3c18da..277844901ce3cdceac4db804cd6aec422ed973c6
@@@ -803,31 -803,35 +803,31 @@@ struct net_device *dev_getfirstbyhwtype
  EXPORT_SYMBOL(dev_getfirstbyhwtype);
  
  /**
 - *    dev_get_by_flags - find any device with given flags
 + *    dev_get_by_flags_rcu - find any device with given flags
   *    @net: the applicable net namespace
   *    @if_flags: IFF_* values
   *    @mask: bitmask of bits in if_flags to check
   *
   *    Search for any interface with the given flags. Returns NULL if a device
 - *    is not found or a pointer to the device. The device returned has
 - *    had a reference added and the pointer is safe until the user calls
 - *    dev_put to indicate they have finished with it.
 + *    is not found or a pointer to the device. Must be called inside
 + *    rcu_read_lock(), and result refcount is unchanged.
   */
  
 -struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
 +struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
                                    unsigned short mask)
  {
        struct net_device *dev, *ret;
  
        ret = NULL;
 -      rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
                if (((dev->flags ^ if_flags) & mask) == 0) {
 -                      dev_hold(dev);
                        ret = dev;
                        break;
                }
        }
 -      rcu_read_unlock();
        return ret;
  }
 -EXPORT_SYMBOL(dev_get_by_flags);
 +EXPORT_SYMBOL(dev_get_by_flags_rcu);
  
  /**
   *    dev_valid_name - check if name is okay for network device
@@@ -1573,9 -1577,7 +1573,9 @@@ EXPORT_SYMBOL(__netif_schedule)
  
  void dev_kfree_skb_irq(struct sk_buff *skb)
  {
 -      if (atomic_dec_and_test(&skb->users)) {
 +      if (!skb->destructor)
 +              dev_kfree_skb(skb);
 +      else if (atomic_dec_and_test(&skb->users)) {
                struct softnet_data *sd;
                unsigned long flags;
  
@@@ -2036,24 -2038,14 +2036,24 @@@ static inline int __dev_xmit_skb(struc
                                 struct netdev_queue *txq)
  {
        spinlock_t *root_lock = qdisc_lock(q);
 +      bool contended = qdisc_is_running(q);
        int rc;
  
 +      /*
 +       * Heuristic to force contended enqueues to serialize on a
 +       * separate lock before trying to get qdisc main lock.
 +       * This permits __QDISC_STATE_RUNNING owner to get the lock more often
 +       * and dequeue packets faster.
 +       */
 +      if (unlikely(contended))
 +              spin_lock(&q->busylock);
 +
        spin_lock(root_lock);
        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
                kfree_skb(skb);
                rc = NET_XMIT_DROP;
        } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
 -                 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
 +                 qdisc_run_begin(q)) {
                /*
                 * This is a work-conserving queue; there are no old skbs
                 * waiting to be sent out; and the qdisc is not running -
                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
                        skb_dst_force(skb);
                __qdisc_update_bstats(q, skb->len);
 -              if (sch_direct_xmit(skb, q, dev, txq, root_lock))
 +              if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
 +                      if (unlikely(contended)) {
 +                              spin_unlock(&q->busylock);
 +                              contended = false;
 +                      }
                        __qdisc_run(q);
 -              else
 -                      clear_bit(__QDISC_STATE_RUNNING, &q->state);
 +              else
 +                      qdisc_run_end(q);
  
                rc = NET_XMIT_SUCCESS;
        } else {
                skb_dst_force(skb);
                rc = qdisc_enqueue_root(skb, q);
 -              qdisc_run(q);
 +              if (qdisc_run_begin(q)) {
 +                      if (unlikely(contended)) {
 +                              spin_unlock(&q->busylock);
 +                              contended = false;
 +                      }
 +                      __qdisc_run(q);
 +              }
        }
        spin_unlock(root_lock);
 -
 +      if (unlikely(contended))
 +              spin_unlock(&q->busylock);
        return rc;
  }
  
  static inline int skb_needs_linearize(struct sk_buff *skb,
                                      struct net_device *dev)
  {
 -      return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
 -             (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
 -                                            illegal_highdma(dev, skb)));
 +      return skb_is_nonlinear(skb) &&
 +             ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
 +              (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
 +                                            illegal_highdma(dev, skb))));
  }
  
  /**
@@@ -2273,11 -2253,9 +2273,9 @@@ static int get_rps_cpu(struct net_devic
        if (skb_rx_queue_recorded(skb)) {
                u16 index = skb_get_rx_queue(skb);
                if (unlikely(index >= dev->num_rx_queues)) {
-                       if (net_ratelimit()) {
-                               pr_warning("%s received packet on queue "
-                                       "%u, but number of RX queues is %u\n",
-                                       dev->name, index, dev->num_rx_queues);
-                       }
+                       WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
+                               "on queue %u, but number of RX queues is %u\n",
+                               dev->name, index, dev->num_rx_queues);
                        goto done;
                }
                rxqueue = dev->_rx + index;
@@@ -2601,14 -2579,70 +2599,14 @@@ static inline int deliver_skb(struct sk
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  }
  
 -#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
 -
 -#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
 +#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
 +    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
  /* This hook is defined here for ATM LANE */
  int (*br_fdb_test_addr_hook)(struct net_device *dev,
                             unsigned char *addr) __read_mostly;
  EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  #endif
  
 -/*
 - * If bridge module is loaded call bridging hook.
 - *  returns NULL if packet was consumed.
 - */
 -struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
 -                                      struct sk_buff *skb) __read_mostly;
 -EXPORT_SYMBOL_GPL(br_handle_frame_hook);
 -
 -static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
 -                                          struct packet_type **pt_prev, int *ret,
 -                                          struct net_device *orig_dev)
 -{
 -      struct net_bridge_port *port;
 -
 -      if (skb->pkt_type == PACKET_LOOPBACK ||
 -          (port = rcu_dereference(skb->dev->br_port)) == NULL)
 -              return skb;
 -
 -      if (*pt_prev) {
 -              *ret = deliver_skb(skb, *pt_prev, orig_dev);
 -              *pt_prev = NULL;
 -      }
 -
 -      return br_handle_frame_hook(port, skb);
 -}
 -#else
 -#define handle_bridge(skb, pt_prev, ret, orig_dev)    (skb)
 -#endif
 -
 -#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
 -struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
 -                                           struct sk_buff *skb) __read_mostly;
 -EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
 -
 -static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
 -                                           struct packet_type **pt_prev,
 -                                           int *ret,
 -                                           struct net_device *orig_dev)
 -{
 -      struct macvlan_port *port;
 -
 -      port = rcu_dereference(skb->dev->macvlan_port);
 -      if (!port)
 -              return skb;
 -
 -      if (*pt_prev) {
 -              *ret = deliver_skb(skb, *pt_prev, orig_dev);
 -              *pt_prev = NULL;
 -      }
 -      return macvlan_handle_frame_hook(port, skb);
 -}
 -#else
 -#define handle_macvlan(skb, pt_prev, ret, orig_dev)   (skb)
 -#endif
 -
  #ifdef CONFIG_NET_CLS_ACT
  /* TODO: Maybe we should just force sch_ingress to be compiled in
   * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@@ -2659,6 -2693,9 +2657,6 @@@ static inline struct sk_buff *handle_in
        if (*pt_prev) {
                *ret = deliver_skb(skb, *pt_prev, orig_dev);
                *pt_prev = NULL;
 -      } else {
 -              /* Huh? Why does turning on AF_PACKET affect this? */
 -              skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
        }
  
        switch (ing_filter(skb)) {
@@@ -2701,47 -2738,6 +2699,47 @@@ void netif_nit_deliver(struct sk_buff *
        rcu_read_unlock();
  }
  
 +/**
 + *    netdev_rx_handler_register - register receive handler
 + *    @dev: device to register a handler for
 + *    @rx_handler: receive handler to register
 + *
 + *    Register a receive hander for a device. This handler will then be
 + *    called from __netif_receive_skb. A negative errno code is returned
 + *    on a failure.
 + *
 + *    The caller must hold the rtnl_mutex.
 + */
 +int netdev_rx_handler_register(struct net_device *dev,
 +                             rx_handler_func_t *rx_handler)
 +{
 +      ASSERT_RTNL();
 +
 +      if (dev->rx_handler)
 +              return -EBUSY;
 +
 +      rcu_assign_pointer(dev->rx_handler, rx_handler);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
 +
 +/**
 + *    netdev_rx_handler_unregister - unregister receive handler
 + *    @dev: device to unregister a handler from
 + *
 + *    Unregister a receive hander from a device.
 + *
 + *    The caller must hold the rtnl_mutex.
 + */
 +void netdev_rx_handler_unregister(struct net_device *dev)
 +{
 +
 +      ASSERT_RTNL();
 +      rcu_assign_pointer(dev->rx_handler, NULL);
 +}
 +EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
 +
  static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
                                              struct net_device *master)
  {
@@@ -2794,7 -2790,6 +2792,7 @@@ EXPORT_SYMBOL(__skb_bond_should_drop)
  static int __netif_receive_skb(struct sk_buff *skb)
  {
        struct packet_type *ptype, *pt_prev;
 +      rx_handler_func_t *rx_handler;
        struct net_device *orig_dev;
        struct net_device *master;
        struct net_device *null_or_orig;
        if (!skb->skb_iif)
                skb->skb_iif = skb->dev->ifindex;
  
+       /*
+        * bonding note: skbs received on inactive slaves should only
+        * be delivered to pkt handlers that are exact matches.  Also
+        * the deliver_no_wcard flag will be set.  If packet handlers
+        * are sensitive to duplicate packets these skbs will need to
+        * be dropped at the handler.  The vlan accel path may have
+        * already set the deliver_no_wcard flag.
+        */
        null_or_orig = NULL;
        orig_dev = skb->dev;
        master = ACCESS_ONCE(orig_dev->master);
-       if (master) {
-               if (skb_bond_should_drop(skb, master))
+       if (skb->deliver_no_wcard)
+               null_or_orig = orig_dev;
+       else if (master) {
+               if (skb_bond_should_drop(skb, master)) {
+                       skb->deliver_no_wcard = 1;
                        null_or_orig = orig_dev; /* deliver only exact match */
-               else
+               else
                        skb->dev = master;
        }
  
 -      __get_cpu_var(softnet_data).processed++;
 -
 +      __this_cpu_inc(softnet_data.processed);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
        skb->mac_len = skb->network_header - skb->mac_header;
  ncls:
  #endif
  
 -      skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
 -      if (!skb)
 -              goto out;
 -      skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
 -      if (!skb)
 -              goto out;
 +      /* Handle special case of bridge or macvlan */
 +      rx_handler = rcu_dereference(skb->dev->rx_handler);
 +      if (rx_handler) {
 +              if (pt_prev) {
 +                      ret = deliver_skb(skb, pt_prev, orig_dev);
 +                      pt_prev = NULL;
 +              }
 +              skb = rx_handler(skb);
 +              if (!skb)
 +                      goto out;
 +      }
  
        /*
         * Make sure frames received on VLAN interfaces stacked on
diff --combined net/ipv4/ipmr.c
index 8418afc357ee825eeb2640443eaa795d0cb31fe6,757f25eb9b4b2404ebebc6c4422b4ad1693ea227..539592294f45ac557f0f996b3276f1afb3d9eba7
@@@ -267,8 -267,10 +267,10 @@@ static void __net_exit ipmr_rules_exit(
  {
        struct mr_table *mrt, *next;
  
-       list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
+       list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
+               list_del(&mrt->list);
                kfree(mrt);
+       }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
  }
  #else
@@@ -1551,9 -1553,9 +1553,9 @@@ static void ipmr_queue_xmit(struct net 
                        goto out_free;
        }
  
 -      dev = rt->u.dst.dev;
 +      dev = rt->dst.dev;
  
 -      if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
 +      if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
                /* Do not fragment multicasts. Alas, IPv4 does not
                   allow to send ICMP, so that packets will disappear
                   to blackhole.
                goto out_free;
        }
  
 -      encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
 +      encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
  
        if (skb_cow(skb, encap)) {
                ip_rt_put(rt);
        vif->bytes_out += skb->len;
  
        skb_dst_drop(skb);
 -      skb_dst_set(skb, &rt->u.dst);
 +      skb_dst_set(skb, &rt->dst);
        ip_decrease_ttl(ip_hdr(skb));
  
        /* FIXME: forward and output firewalls used to be called here.
diff --combined net/mac80211/mlme.c
index 2ab4e86d9929615abfc201cad1b22ba38e29038d,f803f8b72a930dd58b60ae362ea1b7117f1247b2..8fb85c3a043d3e1c8c69c6222d5826dbf8a8e504
@@@ -1760,9 -1760,45 +1760,45 @@@ static void ieee80211_sta_rx_queued_mgm
        mutex_unlock(&ifmgd->mtx);
  
        if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
-           (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
-               cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+           (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
+               struct ieee80211_local *local = sdata->local;
+               struct ieee80211_work *wk;
+               mutex_lock(&local->work_mtx);
+               list_for_each_entry(wk, &local->work_list, list) {
+                       if (wk->sdata != sdata)
+                               continue;
+                       if (wk->type != IEEE80211_WORK_ASSOC)
+                               continue;
+                       if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
+                               continue;
+                       if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
+                               continue;
+                       /*
+                        * Printing the message only here means we can't
+                        * spuriously print it, but it also means that it
+                        * won't be printed when the frame comes in before
+                        * we even tried to associate or in similar cases.
+                        *
+                        * Ultimately, I suspect cfg80211 should print the
+                        * messages instead.
+                        */
+                       printk(KERN_DEBUG
+                              "%s: deauthenticated from %pM (Reason: %u)\n",
+                              sdata->name, mgmt->bssid,
+                              le16_to_cpu(mgmt->u.deauth.reason_code));
+                       list_del_rcu(&wk->list);
+                       free_work(wk);
+                       break;
+               }
+               mutex_unlock(&local->work_mtx);
  
+               cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+       }
   out:
        kfree_skb(skb);
  }
@@@ -1801,7 -1837,7 +1837,7 @@@ static void ieee80211_sta_work(struct w
  
        /*
         * ieee80211_queue_work() should have picked up most cases,
 -       * here we'll pick the the rest.
 +       * here we'll pick the rest.
         */
        if (WARN(local->suspended, "STA MLME work scheduled while "
                 "going to suspend\n"))
@@@ -2116,18 -2152,8 +2152,18 @@@ static enum work_done_result ieee80211_
                        cfg80211_send_assoc_timeout(wk->sdata->dev,
                                                    wk->filter_ta);
                        return WORK_DONE_DESTROY;
 +              } else {
 +                      mutex_unlock(&wk->sdata->u.mgd.mtx);
 +#ifdef CONFIG_INET
 +                      /*
 +                       * configure ARP filter IP addresses to the driver,
 +                       * intentionally outside the mgd mutex.
 +                       */
 +                      rtnl_lock();
 +                      ieee80211_set_arp_filter(wk->sdata);
 +                      rtnl_unlock();
 +#endif
                }
 -              mutex_unlock(&wk->sdata->u.mgd.mtx);
        }
  
        cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
@@@ -2356,7 -2382,6 +2392,7 @@@ int ieee80211_mgd_disassoc(struct ieee8
  int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
                         struct ieee80211_channel *chan,
                         enum nl80211_channel_type channel_type,
 +                       bool channel_type_valid,
                         const u8 *buf, size_t len, u64 *cookie)
  {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
  
        /* Check that we are on the requested channel for transmission */
 -      if ((chan != local->tmp_channel ||
 -           channel_type != local->tmp_channel_type) &&
 -          (chan != local->oper_channel ||
 +      if (chan != local->tmp_channel &&
 +          chan != local->oper_channel)
 +              return -EBUSY;
 +      if (channel_type_valid &&
 +          (channel_type != local->tmp_channel_type &&
             channel_type != local->_oper_channel_type))
                return -EBUSY;