]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/wireless/ath/ath5k/base.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / drivers / net / wireless / ath / ath5k / base.c
index 4a07fb89021e37642c273f4b93e3c58533ad240c..95072db0ec21086b00f88092727691c90ac16b0d 100644 (file)
@@ -673,6 +673,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
                        PCI_DMA_TODEVICE);
 
        rate = ieee80211_get_tx_rate(sc->hw, info);
+       if (!rate) {
+               ret = -EINVAL;
+               goto err_unmap;
+       }
 
        if (info->flags & IEEE80211_TX_CTL_NO_ACK)
                flags |= AR5K_TXDESC_NOACK;
@@ -733,6 +737,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
 
        spin_lock_bh(&txq->lock);
        list_add_tail(&bf->list, &txq->q);
+       txq->txq_len++;
        if (txq->link == NULL) /* is this first packet? */
                ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
        else /* no, so only link it */
@@ -848,9 +853,11 @@ ath5k_txq_setup(struct ath5k_softc *sc,
        struct ath5k_txq *txq;
        struct ath5k_txq_info qi = {
                .tqi_subtype = subtype,
-               .tqi_aifs = AR5K_TXQ_USEDEFAULT,
-               .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
-               .tqi_cw_max = AR5K_TXQ_USEDEFAULT
+               /* XXX: default values not correct for B and XR channels,
+                * but who cares? */
+               .tqi_aifs = AR5K_TUNE_AIFS,
+               .tqi_cw_min = AR5K_TUNE_CWMIN,
+               .tqi_cw_max = AR5K_TUNE_CWMAX
        };
        int qnum;
 
@@ -889,6 +896,9 @@ ath5k_txq_setup(struct ath5k_softc *sc,
                INIT_LIST_HEAD(&txq->q);
                spin_lock_init(&txq->lock);
                txq->setup = true;
+               txq->txq_len = 0;
+               txq->txq_poll_mark = false;
+               txq->txq_stuck = 0;
        }
        return &sc->txqs[qnum];
 }
@@ -897,9 +907,11 @@ static int
 ath5k_beaconq_setup(struct ath5k_hw *ah)
 {
        struct ath5k_txq_info qi = {
-               .tqi_aifs = AR5K_TXQ_USEDEFAULT,
-               .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
-               .tqi_cw_max = AR5K_TXQ_USEDEFAULT,
+               /* XXX: default values not correct for B and XR channels,
+                * but who cares? */
+               .tqi_aifs = AR5K_TUNE_AIFS,
+               .tqi_cw_min = AR5K_TUNE_CWMIN,
+               .tqi_cw_max = AR5K_TUNE_CWMAX,
                /* NB: for dynamic turbo, don't enable any other interrupts */
                .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
        };
@@ -933,7 +945,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
                 */
                qi.tqi_aifs = 0;
                qi.tqi_cw_min = 0;
-               qi.tqi_cw_max = 2 * ah->ah_cw_min;
+               qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
        }
 
        ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
@@ -983,9 +995,11 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
                spin_lock_bh(&sc->txbuflock);
                list_move_tail(&bf->list, &sc->txbuf);
                sc->txbuf_len++;
+               txq->txq_len--;
                spin_unlock_bh(&sc->txbuflock);
        }
        txq->link = NULL;
+       txq->txq_poll_mark = false;
        spin_unlock_bh(&txq->lock);
 }
 
@@ -1479,11 +1493,14 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
                goto drop_packet;
        }
 
+       if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
+               ieee80211_stop_queue(hw, txq->qnum);
+
        spin_lock_irqsave(&sc->txbuflock, flags);
        if (list_empty(&sc->txbuf)) {
                ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
                spin_unlock_irqrestore(&sc->txbuflock, flags);
-               ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
+               ieee80211_stop_queues(hw);
                goto drop_packet;
        }
        bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
@@ -1510,6 +1527,61 @@ drop_packet:
        return NETDEV_TX_OK;
 }
 
+static void
+ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
+                        struct ath5k_tx_status *ts)
+{
+       struct ieee80211_tx_info *info;
+       int i;
+
+       sc->stats.tx_all_count++;
+       info = IEEE80211_SKB_CB(skb);
+
+       ieee80211_tx_info_clear_status(info);
+       for (i = 0; i < 4; i++) {
+               struct ieee80211_tx_rate *r =
+                       &info->status.rates[i];
+
+               if (ts->ts_rate[i]) {
+                       r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
+                       r->count = ts->ts_retry[i];
+               } else {
+                       r->idx = -1;
+                       r->count = 0;
+               }
+       }
+
+       /* count the successful attempt as well */
+       info->status.rates[ts->ts_final_idx].count++;
+
+       if (unlikely(ts->ts_status)) {
+               sc->stats.ack_fail++;
+               if (ts->ts_status & AR5K_TXERR_FILT) {
+                       info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+                       sc->stats.txerr_filt++;
+               }
+               if (ts->ts_status & AR5K_TXERR_XRETRY)
+                       sc->stats.txerr_retry++;
+               if (ts->ts_status & AR5K_TXERR_FIFO)
+                       sc->stats.txerr_fifo++;
+       } else {
+               info->flags |= IEEE80211_TX_STAT_ACK;
+               info->status.ack_signal = ts->ts_rssi;
+       }
+
+       /*
+       * Remove MAC header padding before giving the frame
+       * back to mac80211.
+       */
+       ath5k_remove_padding(skb);
+
+       if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
+               sc->stats.antenna_tx[ts->ts_antenna]++;
+       else
+               sc->stats.antenna_tx[0]++; /* invalid */
+
+       ieee80211_tx_status(sc->hw, skb);
+}
 
 static void
 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
@@ -1518,96 +1590,51 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
        struct ath5k_buf *bf, *bf0;
        struct ath5k_desc *ds;
        struct sk_buff *skb;
-       struct ieee80211_tx_info *info;
-       int i, ret;
+       int ret;
 
        spin_lock(&txq->lock);
        list_for_each_entry_safe(bf, bf0, &txq->q, list) {
-               ds = bf->desc;
-
-               /*
-                * It's possible that the hardware can say the buffer is
-                * completed when it hasn't yet loaded the ds_link from
-                * host memory and moved on.  If there are more TX
-                * descriptors in the queue, wait for TXDP to change
-                * before processing this one.
-                */
-               if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
-                   !list_is_last(&bf->list, &txq->q))
-                       break;
-
-               ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
-               if (unlikely(ret == -EINPROGRESS))
-                       break;
-               else if (unlikely(ret)) {
-                       ATH5K_ERR(sc, "error %d while processing queue %u\n",
-                               ret, txq->qnum);
-                       break;
-               }
-
-               sc->stats.tx_all_count++;
-               skb = bf->skb;
-               info = IEEE80211_SKB_CB(skb);
-               bf->skb = NULL;
 
-               pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
-                               PCI_DMA_TODEVICE);
+               txq->txq_poll_mark = false;
 
-               ieee80211_tx_info_clear_status(info);
-               for (i = 0; i < 4; i++) {
-                       struct ieee80211_tx_rate *r =
-                               &info->status.rates[i];
+               /* skb might already have been processed last time. */
+               if (bf->skb != NULL) {
+                       ds = bf->desc;
 
-                       if (ts.ts_rate[i]) {
-                               r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]);
-                               r->count = ts.ts_retry[i];
-                       } else {
-                               r->idx = -1;
-                               r->count = 0;
+                       ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
+                       if (unlikely(ret == -EINPROGRESS))
+                               break;
+                       else if (unlikely(ret)) {
+                               ATH5K_ERR(sc,
+                                       "error %d while processing "
+                                       "queue %u\n", ret, txq->qnum);
+                               break;
                        }
-               }
-
-               /* count the successful attempt as well */
-               info->status.rates[ts.ts_final_idx].count++;
 
-               if (unlikely(ts.ts_status)) {
-                       sc->stats.ack_fail++;
-                       if (ts.ts_status & AR5K_TXERR_FILT) {
-                               info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
-                               sc->stats.txerr_filt++;
-                       }
-                       if (ts.ts_status & AR5K_TXERR_XRETRY)
-                               sc->stats.txerr_retry++;
-                       if (ts.ts_status & AR5K_TXERR_FIFO)
-                               sc->stats.txerr_fifo++;
-               } else {
-                       info->flags |= IEEE80211_TX_STAT_ACK;
-                       info->status.ack_signal = ts.ts_rssi;
+                       skb = bf->skb;
+                       bf->skb = NULL;
+                       pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
+                                       PCI_DMA_TODEVICE);
+                       ath5k_tx_frame_completed(sc, skb, &ts);
                }
 
                /*
-                * Remove MAC header padding before giving the frame
-                * back to mac80211.
+                * It's possible that the hardware can say the buffer is
+                * completed when it hasn't yet loaded the ds_link from
+                * host memory and moved on.
+                * Always keep the last descriptor to avoid HW races...
                 */
-               ath5k_remove_padding(skb);
-
-               if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
-                       sc->stats.antenna_tx[ts.ts_antenna]++;
-               else
-                       sc->stats.antenna_tx[0]++; /* invalid */
-
-               ieee80211_tx_status(sc->hw, skb);
-
-               spin_lock(&sc->txbuflock);
-               list_move_tail(&bf->list, &sc->txbuf);
-               sc->txbuf_len++;
-               spin_unlock(&sc->txbuflock);
+               if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
+                       spin_lock(&sc->txbuflock);
+                       list_move_tail(&bf->list, &sc->txbuf);
+                       sc->txbuf_len++;
+                       txq->txq_len--;
+                       spin_unlock(&sc->txbuflock);
+               }
        }
-       if (likely(list_empty(&txq->q)))
-               txq->link = NULL;
        spin_unlock(&txq->lock);
-       if (sc->txbuf_len > ATH_TXBUF / 5)
-               ieee80211_wake_queues(sc->hw);
+       if (txq->txq_len < ATH5K_TXQ_LEN_LOW)
+               ieee80211_wake_queue(sc->hw, txq->qnum);
 }
 
 static void
@@ -2131,14 +2158,13 @@ ath5k_tasklet_calibrate(unsigned long data)
                                sc->curchan->center_freq));
 
        /* Noise floor calibration interrupts rx/tx path while I/Q calibration
-        * doesn't. We stop the queues so that calibration doesn't interfere
-        * with TX and don't run it as often */
+        * doesn't.
+        * TODO: We should stop TX here, so that it doesn't interfere.
+        * Note that stopping the queues is not enough to stop TX! */
        if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
                ah->ah_cal_next_nf = jiffies +
                        msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
-               ieee80211_stop_queues(sc->hw);
                ath5k_hw_update_noise_floor(ah);
-               ieee80211_wake_queues(sc->hw);
        }
 
        ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
@@ -2157,6 +2183,47 @@ ath5k_tasklet_ani(unsigned long data)
 }
 
 
+static void
+ath5k_tx_complete_poll_work(struct work_struct *work)
+{
+       struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
+                       tx_complete_work.work);
+       struct ath5k_txq *txq;
+       int i;
+       bool needreset = false;
+
+       for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
+               if (sc->txqs[i].setup) {
+                       txq = &sc->txqs[i];
+                       spin_lock_bh(&txq->lock);
+                       if (txq->txq_len > 1) {
+                               if (txq->txq_poll_mark) {
+                                       ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
+                                                 "TX queue stuck %d\n",
+                                                 txq->qnum);
+                                       needreset = true;
+                                       txq->txq_stuck++;
+                                       spin_unlock_bh(&txq->lock);
+                                       break;
+                               } else {
+                                       txq->txq_poll_mark = true;
+                               }
+                       }
+                       spin_unlock_bh(&txq->lock);
+               }
+       }
+
+       if (needreset) {
+               ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+                         "TX queues stuck, resetting\n");
+               ath5k_reset(sc, sc->curchan);
+       }
+
+       ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+               msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
+}
+
+
 /*************************\
 * Initialization routines *
 \*************************/
@@ -2248,6 +2315,10 @@ ath5k_init(struct ath5k_softc *sc)
 done:
        mmiowb();
        mutex_unlock(&sc->lock);
+
+       ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+                       msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
+
        return ret;
 }
 
@@ -2306,6 +2377,8 @@ ath5k_stop_hw(struct ath5k_softc *sc)
 
        stop_tasklets(sc);
 
+       cancel_delayed_work_sync(&sc->tx_complete_work);
+
        ath5k_rfkill_hw_stop(sc->ah);
 
        return ret;
@@ -2391,6 +2464,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
        struct ath5k_softc *sc = hw->priv;
        struct ath5k_hw *ah = sc->ah;
        struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
+       struct ath5k_txq *txq;
        u8 mac[ETH_ALEN] = {};
        int ret;
 
@@ -2456,12 +2530,33 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
                goto err_bhal;
        }
 
-       sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
-       if (IS_ERR(sc->txq)) {
+       /* This order matches mac80211's queue priority, so we can
+        * directly use the mac80211 queue number without any mapping */
+       txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
+       if (IS_ERR(txq)) {
+               ATH5K_ERR(sc, "can't setup xmit queue\n");
+               ret = PTR_ERR(txq);
+               goto err_queues;
+       }
+       txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
+       if (IS_ERR(txq)) {
+               ATH5K_ERR(sc, "can't setup xmit queue\n");
+               ret = PTR_ERR(txq);
+               goto err_queues;
+       }
+       txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+       if (IS_ERR(txq)) {
+               ATH5K_ERR(sc, "can't setup xmit queue\n");
+               ret = PTR_ERR(txq);
+               goto err_queues;
+       }
+       txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
+       if (IS_ERR(txq)) {
                ATH5K_ERR(sc, "can't setup xmit queue\n");
-               ret = PTR_ERR(sc->txq);
+               ret = PTR_ERR(txq);
                goto err_queues;
        }
+       hw->queues = 4;
 
        tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
        tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
@@ -2470,6 +2565,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
        tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
 
        INIT_WORK(&sc->reset_work, ath5k_reset_work);
+       INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
 
        ret = ath5k_eeprom_read_mac(ah, mac);
        if (ret) {
@@ -2554,8 +2650,14 @@ static int
 ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ath5k_softc *sc = hw->priv;
+       u16 qnum = skb_get_queue_mapping(skb);
+
+       if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
+               dev_kfree_skb_any(skb);
+               return 0;
+       }
 
-       return ath5k_tx_queue(hw, skb, sc->txq);
+       return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
 }
 
 static int ath5k_start(struct ieee80211_hw *hw)
@@ -3042,6 +3144,44 @@ static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
        mutex_unlock(&sc->lock);
 }
 
+static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
+                        const struct ieee80211_tx_queue_params *params)
+{
+       struct ath5k_softc *sc = hw->priv;
+       struct ath5k_hw *ah = sc->ah;
+       struct ath5k_txq_info qi;
+       int ret = 0;
+
+       if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
+               return 0;
+
+       mutex_lock(&sc->lock);
+
+       ath5k_hw_get_tx_queueprops(ah, queue, &qi);
+
+       qi.tqi_aifs = params->aifs;
+       qi.tqi_cw_min = params->cw_min;
+       qi.tqi_cw_max = params->cw_max;
+       qi.tqi_burst_time = params->txop;
+
+       ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
+                 "Configure tx [queue %d],  "
+                 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+                 queue, params->aifs, params->cw_min,
+                 params->cw_max, params->txop);
+
+       if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
+               ATH5K_ERR(sc,
+                         "Unable to update hardware queue %u!\n", queue);
+               ret = -EIO;
+       } else
+               ath5k_hw_reset_tx_queue(ah, queue);
+
+       mutex_unlock(&sc->lock);
+
+       return ret;
+}
+
 static const struct ieee80211_ops ath5k_hw_ops = {
        .tx             = ath5k_tx,
        .start          = ath5k_start,
@@ -3054,7 +3194,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
        .set_key        = ath5k_set_key,
        .get_stats      = ath5k_get_stats,
        .get_survey     = ath5k_get_survey,
-       .conf_tx        = NULL,
+       .conf_tx        = ath5k_conf_tx,
        .get_tsf        = ath5k_get_tsf,
        .set_tsf        = ath5k_set_tsf,
        .reset_tsf      = ath5k_reset_tsf,