]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/wireless/ath/ath9k/xmit.c
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[net-next-2.6.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357
S
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
f078f209 126
e8324357
S
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
130}
131
e8324357 132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 133{
e8324357 134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
e6a9854b 135
9680e8a3 136 BUG_ON(tid->paused <= 0);
e8324357 137 spin_lock_bh(&txq->axq_lock);
f078f209 138
e8324357 139 tid->paused--;
f078f209 140
e8324357
S
141 if (tid->paused > 0)
142 goto unlock;
f078f209 143
e8324357
S
144 if (list_empty(&tid->buf_q))
145 goto unlock;
f078f209 146
e8324357
S
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
528f0c6b 151}
f078f209 152
e8324357 153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 154{
e8324357
S
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
f078f209 159
9680e8a3 160 BUG_ON(tid->paused <= 0);
e8324357 161 spin_lock_bh(&txq->axq_lock);
e6a9854b 162
e8324357 163 tid->paused--;
f078f209 164
e8324357
S
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
f078f209 169
e8324357
S
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
9680e8a3 172 BUG_ON(bf_isretried(bf));
d43f3015 173 list_move_tail(&bf->list, &bf_head);
c37452b0 174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
528f0c6b 175 }
f078f209 176
e8324357 177 spin_unlock_bh(&txq->axq_lock);
528f0c6b 178}
f078f209 179
e8324357
S
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
528f0c6b 182{
e8324357 183 int index, cindex;
f078f209 184
e8324357
S
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 187
e8324357 188 tid->tx_buf[cindex] = NULL;
528f0c6b 189
e8324357
S
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
528f0c6b 194}
f078f209 195
e8324357
S
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
528f0c6b 198{
e8324357 199 int index, cindex;
528f0c6b 200
e8324357
S
201 if (bf_isretried(bf))
202 return;
528f0c6b 203
e8324357
S
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 206
9680e8a3 207 BUG_ON(tid->tx_buf[cindex] != NULL);
e8324357 208 tid->tx_buf[cindex] = bf;
f078f209 209
e8324357
S
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 214 }
f078f209
LR
215}
216
217/*
e8324357
S
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
f078f209 222 */
e8324357
S
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
f078f209 225
f078f209 226{
e8324357
S
227 struct ath_buf *bf;
228 struct list_head bf_head;
db1a052b
FF
229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
e8324357 232 INIT_LIST_HEAD(&bf_head);
f078f209 233
e8324357
S
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
f078f209 237
d43f3015
S
238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
f078f209 240
e8324357
S
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 243
e8324357 244 spin_unlock(&txq->axq_lock);
db1a052b 245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
246 spin_lock(&txq->axq_lock);
247 }
f078f209 248
e8324357
S
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
f078f209
LR
251}
252
fec247c0
S
253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
f078f209 255{
e8324357
S
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
f078f209 258
e8324357
S
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
fec247c0 261 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 262
e8324357
S
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
266}
267
0a8cea84 268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 269{
0a8cea84 270 struct ath_buf *bf = NULL;
d43f3015
S
271
272 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
0a8cea84
FF
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
d43f3015
S
282 spin_unlock_bh(&sc->tx.txbuflock);
283
0a8cea84
FF
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
300 return NULL;
301
d43f3015
S
302 ATH_TXBUF_RESET(tbf);
303
827e69bf 304 tbf->aphy = bf->aphy;
d43f3015
S
305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015
S
308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 316 struct ath_tx_status *ts, int txok)
f078f209 317{
e8324357
S
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
1286ec6d 320 struct ieee80211_sta *sta;
76d5a9e8 321 struct ieee80211_hw *hw;
1286ec6d 322 struct ieee80211_hdr *hdr;
76d5a9e8 323 struct ieee80211_tx_info *tx_info;
e8324357 324 struct ath_atx_tid *tid = NULL;
d43f3015 325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 326 struct list_head bf_head, bf_pending;
0934af23 327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 328 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
f078f209 331
a22be22a 332 skb = bf->bf_mpdu;
1286ec6d
S
333 hdr = (struct ieee80211_hdr *)skb->data;
334
76d5a9e8 335 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 336 hw = bf->aphy->hw;
76d5a9e8 337
1286ec6d 338 rcu_read_lock();
f078f209 339
5ed176e1 340 /* XXX: use ieee80211_find_sta! */
76d5a9e8 341 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
1286ec6d
S
342 if (!sta) {
343 rcu_read_unlock();
344 return;
f078f209
LR
345 }
346
1286ec6d
S
347 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349
e8324357 350 isaggr = bf_isaggr(bf);
d43f3015 351 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 352
d43f3015 353 if (isaggr && txok) {
db1a052b
FF
354 if (ts->ts_flags & ATH9K_TX_BA) {
355 seq_st = ts->ts_seqnum;
356 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 357 } else {
d43f3015
S
358 /*
359 * AR5416 can become deaf/mute when BA
360 * issue happens. Chip needs to be reset.
361 * But AP code may have sychronization issues
362 * when perform internal reset in this routine.
363 * Only enable reset in STA mode for now.
364 */
2660b81a 365 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 366 needreset = 1;
e8324357 367 }
f078f209
LR
368 }
369
e8324357
S
370 INIT_LIST_HEAD(&bf_pending);
371 INIT_LIST_HEAD(&bf_head);
f078f209 372
db1a052b 373 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
374 while (bf) {
375 txfail = txpending = 0;
376 bf_next = bf->bf_next;
f078f209 377
e8324357
S
378 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
379 /* transmit completion, subframe is
380 * acked by block ack */
0934af23 381 acked_cnt++;
e8324357
S
382 } else if (!isaggr && txok) {
383 /* transmit completion */
0934af23 384 acked_cnt++;
e8324357 385 } else {
e8324357 386 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 387 !bf_last->bf_tx_aborted) {
e8324357 388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 389 ath_tx_set_retry(sc, txq, bf);
e8324357
S
390 txpending = 1;
391 } else {
392 bf->bf_state.bf_type |= BUF_XRETRY;
393 txfail = 1;
394 sendbar = 1;
0934af23 395 txfail_cnt++;
e8324357
S
396 }
397 } else {
398 /*
399 * cleanup in progress, just fail
400 * the un-acked sub-frames
401 */
402 txfail = 1;
403 }
404 }
f078f209 405
e5003249
VT
406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
407 bf_next == NULL) {
cbfe89c6
VT
408 /*
409 * Make sure the last desc is reclaimed if it
410 * not a holding desc.
411 */
412 if (!bf_last->bf_stale)
413 list_move_tail(&bf->list, &bf_head);
414 else
415 INIT_LIST_HEAD(&bf_head);
e8324357 416 } else {
9680e8a3 417 BUG_ON(list_empty(bf_q));
d43f3015 418 list_move_tail(&bf->list, &bf_head);
e8324357 419 }
f078f209 420
e8324357
S
421 if (!txpending) {
422 /*
423 * complete the acked-ones/xretried ones; update
424 * block-ack window
425 */
426 spin_lock_bh(&txq->axq_lock);
427 ath_tx_update_baw(sc, tid, bf->bf_seqno);
428 spin_unlock_bh(&txq->axq_lock);
f078f209 429
8a92e2ee 430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
db1a052b 431 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
432 rc_update = false;
433 } else {
db1a052b 434 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
435 }
436
db1a052b
FF
437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
438 !txfail, sendbar);
e8324357 439 } else {
d43f3015 440 /* retry the un-acked ones */
e5003249
VT
441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
442 if (bf->bf_next == NULL && bf_last->bf_stale) {
443 struct ath_buf *tbf;
444
445 tbf = ath_clone_txbuf(sc, bf_last);
446 /*
447 * Update tx baw and complete the
448 * frame with failed status if we
449 * run out of tx buf.
450 */
451 if (!tbf) {
452 spin_lock_bh(&txq->axq_lock);
453 ath_tx_update_baw(sc, tid,
454 bf->bf_seqno);
455 spin_unlock_bh(&txq->axq_lock);
456
457 bf->bf_state.bf_type |=
458 BUF_XRETRY;
459 ath_tx_rc_status(bf, ts, nbad,
460 0, false);
461 ath_tx_complete_buf(sc, bf, txq,
462 &bf_head,
463 ts, 0, 0);
464 break;
465 }
466
467 ath9k_hw_cleartxdesc(sc->sc_ah,
468 tbf->bf_desc);
469 list_add_tail(&tbf->list, &bf_head);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah,
476 bf->bf_desc);
c41d92dc 477 }
e8324357
S
478 }
479
480 /*
481 * Put this buffer to the temporary pending
482 * queue to retain ordering
483 */
484 list_splice_tail_init(&bf_head, &bf_pending);
485 }
486
487 bf = bf_next;
f078f209 488 }
f078f209 489
e8324357 490 if (tid->state & AGGR_CLEANUP) {
e8324357
S
491 if (tid->baw_head == tid->baw_tail) {
492 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 493 tid->state &= ~AGGR_CLEANUP;
e63835b0 494
e8324357
S
495 /* send buffered frames as singles */
496 ath_tx_flush_tid(sc, tid);
d43f3015 497 }
1286ec6d 498 rcu_read_unlock();
e8324357
S
499 return;
500 }
f078f209 501
d43f3015 502 /* prepend un-acked frames to the beginning of the pending frame queue */
e8324357
S
503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
102e0572 509
1286ec6d
S
510 rcu_read_unlock();
511
e8324357
S
512 if (needreset)
513 ath_reset(sc, false);
e8324357 514}
f078f209 515
e8324357
S
516static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
517 struct ath_atx_tid *tid)
f078f209 518{
528f0c6b
S
519 struct sk_buff *skb;
520 struct ieee80211_tx_info *tx_info;
a8efee4f 521 struct ieee80211_tx_rate *rates;
d43f3015 522 u32 max_4ms_framelen, frmlen;
4ef70841 523 u16 aggr_limit, legacy = 0;
e8324357 524 int i;
528f0c6b 525
a22be22a 526 skb = bf->bf_mpdu;
528f0c6b 527 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 528 rates = tx_info->control.rates;
528f0c6b 529
e8324357
S
530 /*
531 * Find the lowest frame length among the rate series that will have a
532 * 4ms transmit duration.
533 * TODO - TXOP limit needs to be considered.
534 */
535 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 536
e8324357
S
537 for (i = 0; i < 4; i++) {
538 if (rates[i].count) {
545750d3
FF
539 int modeidx;
540 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
541 legacy = 1;
542 break;
543 }
544
0e668cde 545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
546 modeidx = MCS_HT40;
547 else
0e668cde
FF
548 modeidx = MCS_HT20;
549
550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
551 modeidx++;
545750d3
FF
552
553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 554 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
555 }
556 }
e63835b0 557
f078f209 558 /*
e8324357
S
559 * limit aggregate size by the minimum rate if rate selected is
560 * not a probe rate, if rate selected is a probe rate then
561 * avoid aggregation of this packet.
f078f209 562 */
e8324357
S
563 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
564 return 0;
f078f209 565
1773912b
VT
566 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
567 aggr_limit = min((max_4ms_framelen * 3) / 8,
568 (u32)ATH_AMPDU_LIMIT_MAX);
569 else
570 aggr_limit = min(max_4ms_framelen,
571 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 572
e8324357
S
573 /*
574 * h/w can accept aggregates upto 16 bit lengths (65535).
575 * The IE, however can hold upto 65536, which shows up here
576 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 577 */
4ef70841
S
578 if (tid->an->maxampdu)
579 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 580
e8324357
S
581 return aggr_limit;
582}
f078f209 583
e8324357 584/*
d43f3015 585 * Returns the number of delimiters to be added to
e8324357 586 * meet the minimum required mpdudensity.
e8324357
S
587 */
588static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
589 struct ath_buf *bf, u16 frmlen)
590{
e8324357
S
591 struct sk_buff *skb = bf->bf_mpdu;
592 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 593 u32 nsymbits, nsymbols;
e8324357 594 u16 minlen;
545750d3 595 u8 flags, rix;
c6663876 596 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
597
598 /* Select standard number of delimiters based on frame length alone */
599 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
600
601 /*
e8324357
S
602 * If encryption enabled, hardware requires some more padding between
603 * subframes.
604 * TODO - this could be improved to be dependent on the rate.
605 * The hardware can keep up at lower rates, but not higher rates
f078f209 606 */
e8324357
S
607 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
608 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 609
e8324357
S
610 /*
611 * Convert desired mpdu density from microeconds to bytes based
612 * on highest rate in rate series (i.e. first rate) to determine
613 * required minimum length for subframe. Take into account
614 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 615 *
e8324357
S
616 * If there is no mpdu density restriction, no further calculation
617 * is needed.
618 */
4ef70841
S
619
620 if (tid->an->mpdudensity == 0)
e8324357 621 return ndelim;
f078f209 622
e8324357
S
623 rix = tx_info->control.rates[0].idx;
624 flags = tx_info->control.rates[0].flags;
e8324357
S
625 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
626 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 627
e8324357 628 if (half_gi)
4ef70841 629 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 630 else
4ef70841 631 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 632
e8324357
S
633 if (nsymbols == 0)
634 nsymbols = 1;
f078f209 635
c6663876
FF
636 streams = HT_RC_2_STREAMS(rix);
637 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 639
e8324357 640 if (frmlen < minlen) {
e8324357
S
641 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
642 ndelim = max(mindelim, ndelim);
f078f209
LR
643 }
644
e8324357 645 return ndelim;
f078f209
LR
646}
647
e8324357 648static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 649 struct ath_txq *txq,
d43f3015
S
650 struct ath_atx_tid *tid,
651 struct list_head *bf_q)
f078f209 652{
e8324357 653#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
654 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
655 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
656 u16 aggr_limit = 0, al = 0, bpad = 0,
657 al_delta, h_baw = tid->baw_size / 2;
658 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
f078f209 659
e8324357 660 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 661
e8324357
S
662 do {
663 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 664
d43f3015 665 /* do not step over block-ack window */
e8324357
S
666 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
667 status = ATH_AGGR_BAW_CLOSED;
668 break;
669 }
f078f209 670
e8324357
S
671 if (!rl) {
672 aggr_limit = ath_lookup_rate(sc, bf, tid);
673 rl = 1;
674 }
f078f209 675
d43f3015 676 /* do not exceed aggregation limit */
e8324357 677 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 678
d43f3015
S
679 if (nframes &&
680 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
681 status = ATH_AGGR_LIMITED;
682 break;
683 }
f078f209 684
d43f3015
S
685 /* do not exceed subframe limit */
686 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
687 status = ATH_AGGR_LIMITED;
688 break;
689 }
d43f3015 690 nframes++;
f078f209 691
d43f3015 692 /* add padding for previous frame to aggregation length */
e8324357 693 al += bpad + al_delta;
f078f209 694
e8324357
S
695 /*
696 * Get the delimiters needed to meet the MPDU
697 * density for this node.
698 */
699 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 700 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 701
e8324357 702 bf->bf_next = NULL;
87d5efbb 703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 704
d43f3015 705 /* link buffers of this frame to the aggregate */
e8324357 706 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
707 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
708 list_move_tail(&bf->list, bf_q);
e8324357
S
709 if (bf_prev) {
710 bf_prev->bf_next = bf;
87d5efbb
VT
711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
712 bf->bf_daddr);
e8324357
S
713 }
714 bf_prev = bf;
fec247c0 715
e8324357 716 } while (!list_empty(&tid->buf_q));
f078f209 717
e8324357
S
718 bf_first->bf_al = al;
719 bf_first->bf_nframes = nframes;
d43f3015 720
e8324357
S
721 return status;
722#undef PADBYTES
723}
f078f209 724
e8324357
S
725static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
726 struct ath_atx_tid *tid)
727{
d43f3015 728 struct ath_buf *bf;
e8324357
S
729 enum ATH_AGGR_STATUS status;
730 struct list_head bf_q;
f078f209 731
e8324357
S
732 do {
733 if (list_empty(&tid->buf_q))
734 return;
f078f209 735
e8324357
S
736 INIT_LIST_HEAD(&bf_q);
737
fec247c0 738 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 739
f078f209 740 /*
d43f3015
S
741 * no frames picked up to be aggregated;
742 * block-ack window is not open.
f078f209 743 */
e8324357
S
744 if (list_empty(&bf_q))
745 break;
f078f209 746
e8324357 747 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 748 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 749
d43f3015 750 /* if only one frame, send as non-aggregate */
e8324357 751 if (bf->bf_nframes == 1) {
e8324357 752 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 753 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
754 ath_buf_set_rate(sc, bf);
755 ath_tx_txqaddbuf(sc, txq, &bf_q);
756 continue;
757 }
f078f209 758
d43f3015 759 /* setup first desc of aggregate */
e8324357
S
760 bf->bf_state.bf_type |= BUF_AGGR;
761 ath_buf_set_rate(sc, bf);
762 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 763
d43f3015
S
764 /* anchor last desc of aggregate */
765 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 766
e8324357 767 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 768 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 769
e8324357
S
770 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
771 status != ATH_AGGR_BAW_CLOSED);
772}
773
f83da965
S
774void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
775 u16 tid, u16 *ssn)
e8324357
S
776{
777 struct ath_atx_tid *txtid;
778 struct ath_node *an;
779
780 an = (struct ath_node *)sta->drv_priv;
f83da965
S
781 txtid = ATH_AN_2_TID(an, tid);
782 txtid->state |= AGGR_ADDBA_PROGRESS;
783 ath_tx_pause_tid(sc, txtid);
784 *ssn = txtid->seq_start;
e8324357 785}
f078f209 786
f83da965 787void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
788{
789 struct ath_node *an = (struct ath_node *)sta->drv_priv;
790 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
791 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
db1a052b 792 struct ath_tx_status ts;
e8324357
S
793 struct ath_buf *bf;
794 struct list_head bf_head;
db1a052b
FF
795
796 memset(&ts, 0, sizeof(ts));
e8324357 797 INIT_LIST_HEAD(&bf_head);
f078f209 798
e8324357 799 if (txtid->state & AGGR_CLEANUP)
f83da965 800 return;
f078f209 801
e8324357 802 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 803 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 804 return;
e8324357 805 }
f078f209 806
e8324357
S
807 ath_tx_pause_tid(sc, txtid);
808
809 /* drop all software retried frames and mark this TID */
810 spin_lock_bh(&txq->axq_lock);
811 while (!list_empty(&txtid->buf_q)) {
812 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
813 if (!bf_isretried(bf)) {
814 /*
815 * NB: it's based on the assumption that
816 * software retried frame will always stay
817 * at the head of software queue.
818 */
819 break;
820 }
d43f3015 821 list_move_tail(&bf->list, &bf_head);
e8324357 822 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
db1a052b 823 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209 824 }
d43f3015 825 spin_unlock_bh(&txq->axq_lock);
f078f209 826
e8324357 827 if (txtid->baw_head != txtid->baw_tail) {
e8324357
S
828 txtid->state |= AGGR_CLEANUP;
829 } else {
830 txtid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 831 ath_tx_flush_tid(sc, txtid);
f078f209 832 }
e8324357 833}
f078f209 834
e8324357
S
835void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
836{
837 struct ath_atx_tid *txtid;
838 struct ath_node *an;
839
840 an = (struct ath_node *)sta->drv_priv;
841
842 if (sc->sc_flags & SC_OP_TXAGGR) {
843 txtid = ATH_AN_2_TID(an, tid);
844 txtid->baw_size =
845 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
846 txtid->state |= AGGR_ADDBA_COMPLETE;
847 txtid->state &= ~AGGR_ADDBA_PROGRESS;
848 ath_tx_resume_tid(sc, txtid);
849 }
f078f209
LR
850}
851
e8324357 852bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
c4288390 853{
e8324357 854 struct ath_atx_tid *txtid;
c4288390 855
e8324357
S
856 if (!(sc->sc_flags & SC_OP_TXAGGR))
857 return false;
c4288390 858
e8324357
S
859 txtid = ATH_AN_2_TID(an, tidno);
860
c3d8f02e 861 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
e8324357 862 return true;
e8324357 863 return false;
c4288390
S
864}
865
e8324357
S
866/********************/
867/* Queue Management */
868/********************/
f078f209 869
e8324357
S
870static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
871 struct ath_txq *txq)
f078f209 872{
e8324357
S
873 struct ath_atx_ac *ac, *ac_tmp;
874 struct ath_atx_tid *tid, *tid_tmp;
f078f209 875
e8324357
S
876 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
877 list_del(&ac->list);
878 ac->sched = false;
879 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
880 list_del(&tid->list);
881 tid->sched = false;
882 ath_tid_drain(sc, txq, tid);
883 }
f078f209
LR
884 }
885}
886
e8324357 887struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 888{
cbe61d8a 889 struct ath_hw *ah = sc->sc_ah;
c46917bb 890 struct ath_common *common = ath9k_hw_common(ah);
e8324357 891 struct ath9k_tx_queue_info qi;
e5003249 892 int qnum, i;
f078f209 893
e8324357
S
894 memset(&qi, 0, sizeof(qi));
895 qi.tqi_subtype = subtype;
896 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
898 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
899 qi.tqi_physCompBuf = 0;
f078f209
LR
900
901 /*
e8324357
S
902 * Enable interrupts only for EOL and DESC conditions.
903 * We mark tx descriptors to receive a DESC interrupt
904 * when a tx queue gets deep; otherwise waiting for the
905 * EOL to reap descriptors. Note that this is done to
906 * reduce interrupt load and this only defers reaping
907 * descriptors, never transmitting frames. Aside from
908 * reducing interrupts this also permits more concurrency.
909 * The only potential downside is if the tx queue backs
910 * up in which case the top half of the kernel may backup
911 * due to a lack of tx descriptors.
912 *
913 * The UAPSD queue is an exception, since we take a desc-
914 * based intr on the EOSP frames.
f078f209 915 */
afe754d6
VT
916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
918 TXQ_FLAG_TXERRINT_ENABLE;
919 } else {
920 if (qtype == ATH9K_TX_QUEUE_UAPSD)
921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
922 else
923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
924 TXQ_FLAG_TXDESCINT_ENABLE;
925 }
e8324357
S
926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
927 if (qnum == -1) {
f078f209 928 /*
e8324357
S
929 * NB: don't print a message, this happens
930 * normally on parts with too few tx queues
f078f209 931 */
e8324357 932 return NULL;
f078f209 933 }
e8324357 934 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
935 ath_print(common, ATH_DBG_FATAL,
936 "qnum %u out of range, max %u!\n",
937 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
938 ath9k_hw_releasetxqueue(ah, qnum);
939 return NULL;
940 }
941 if (!ATH_TXQ_SETUP(sc, qnum)) {
942 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 943
e8324357
S
944 txq->axq_qnum = qnum;
945 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q);
947 INIT_LIST_HEAD(&txq->axq_acq);
948 spin_lock_init(&txq->axq_lock);
949 txq->axq_depth = 0;
164ace38 950 txq->axq_tx_inprogress = false;
e8324357 951 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
952
953 txq->txq_headidx = txq->txq_tailidx = 0;
954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
955 INIT_LIST_HEAD(&txq->txq_fifo[i]);
956 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
957 }
958 return &sc->tx.txq[qnum];
f078f209
LR
959}
960
1773912b 961int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
f078f209 962{
e8324357 963 int qnum;
f078f209 964
e8324357
S
965 switch (qtype) {
966 case ATH9K_TX_QUEUE_DATA:
967 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
968 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
969 "HAL AC %u out of range, max %zu!\n",
970 haltype, ARRAY_SIZE(sc->tx.hwq_map));
e8324357
S
971 return -1;
972 }
973 qnum = sc->tx.hwq_map[haltype];
974 break;
975 case ATH9K_TX_QUEUE_BEACON:
976 qnum = sc->beacon.beaconq;
977 break;
978 case ATH9K_TX_QUEUE_CAB:
979 qnum = sc->beacon.cabq->axq_qnum;
980 break;
981 default:
982 qnum = -1;
983 }
984 return qnum;
985}
f078f209 986
e8324357
S
987struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
988{
989 struct ath_txq *txq = NULL;
f52de03b 990 u16 skb_queue = skb_get_queue_mapping(skb);
e8324357 991 int qnum;
f078f209 992
f52de03b 993 qnum = ath_get_hal_qnum(skb_queue, sc);
e8324357 994 txq = &sc->tx.txq[qnum];
f078f209 995
e8324357
S
996 spin_lock_bh(&txq->axq_lock);
997
998 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
c46917bb
LR
999 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
1000 "TX queue: %d is full, depth: %d\n",
1001 qnum, txq->axq_depth);
f52de03b 1002 ath_mac80211_stop_queue(sc, skb_queue);
e8324357
S
1003 txq->stopped = 1;
1004 spin_unlock_bh(&txq->axq_lock);
1005 return NULL;
f078f209
LR
1006 }
1007
e8324357
S
1008 spin_unlock_bh(&txq->axq_lock);
1009
1010 return txq;
1011}
1012
1013int ath_txq_update(struct ath_softc *sc, int qnum,
1014 struct ath9k_tx_queue_info *qinfo)
1015{
cbe61d8a 1016 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1017 int error = 0;
1018 struct ath9k_tx_queue_info qi;
1019
1020 if (qnum == sc->beacon.beaconq) {
1021 /*
1022 * XXX: for beacon queue, we just save the parameter.
1023 * It will be picked up by ath_beaconq_config when
1024 * it's necessary.
1025 */
1026 sc->beacon.beacon_qi = *qinfo;
f078f209 1027 return 0;
e8324357 1028 }
f078f209 1029
9680e8a3 1030 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1031
1032 ath9k_hw_get_txq_props(ah, qnum, &qi);
1033 qi.tqi_aifs = qinfo->tqi_aifs;
1034 qi.tqi_cwmin = qinfo->tqi_cwmin;
1035 qi.tqi_cwmax = qinfo->tqi_cwmax;
1036 qi.tqi_burstTime = qinfo->tqi_burstTime;
1037 qi.tqi_readyTime = qinfo->tqi_readyTime;
1038
1039 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
1040 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1041 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1042 error = -EIO;
1043 } else {
1044 ath9k_hw_resettxqueue(ah, qnum);
1045 }
1046
1047 return error;
1048}
1049
1050int ath_cabq_update(struct ath_softc *sc)
1051{
1052 struct ath9k_tx_queue_info qi;
1053 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1054
e8324357 1055 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1056 /*
e8324357 1057 * Ensure the readytime % is within the bounds.
f078f209 1058 */
17d7904d
S
1059 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1060 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1061 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1062 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1063
57c4d7b4 1064 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1065 sc->config.cabqReadytime) / 100;
e8324357
S
1066 ath_txq_update(sc, qnum, &qi);
1067
1068 return 0;
f078f209
LR
1069}
1070
043a0405
S
1071/*
1072 * Drain a given TX queue (could be Beacon or Data)
1073 *
1074 * This assumes output has been stopped and
1075 * we do not need to block ath_tx_tasklet.
1076 */
1077void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1078{
e8324357
S
1079 struct ath_buf *bf, *lastbf;
1080 struct list_head bf_head;
db1a052b
FF
1081 struct ath_tx_status ts;
1082
1083 memset(&ts, 0, sizeof(ts));
e8324357 1084 INIT_LIST_HEAD(&bf_head);
f078f209 1085
e8324357
S
1086 for (;;) {
1087 spin_lock_bh(&txq->axq_lock);
f078f209 1088
e5003249
VT
1089 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1090 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1091 txq->txq_headidx = txq->txq_tailidx = 0;
1092 spin_unlock_bh(&txq->axq_lock);
1093 break;
1094 } else {
1095 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1096 struct ath_buf, list);
1097 }
1098 } else {
1099 if (list_empty(&txq->axq_q)) {
1100 txq->axq_link = NULL;
1101 spin_unlock_bh(&txq->axq_lock);
1102 break;
1103 }
1104 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1105 list);
f078f209 1106
e5003249
VT
1107 if (bf->bf_stale) {
1108 list_del(&bf->list);
1109 spin_unlock_bh(&txq->axq_lock);
f078f209 1110
0a8cea84 1111 ath_tx_return_buffer(sc, bf);
e5003249
VT
1112 continue;
1113 }
e8324357 1114 }
f078f209 1115
e8324357 1116 lastbf = bf->bf_lastbf;
6d913f7d
VT
1117 if (!retry_tx)
1118 lastbf->bf_tx_aborted = true;
f078f209 1119
e5003249
VT
1120 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1121 list_cut_position(&bf_head,
1122 &txq->txq_fifo[txq->txq_tailidx],
1123 &lastbf->list);
1124 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1125 } else {
1126 /* remove ath_buf's of the same mpdu from txq */
1127 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1128 }
1129
e8324357 1130 txq->axq_depth--;
f078f209 1131
e8324357
S
1132 spin_unlock_bh(&txq->axq_lock);
1133
1134 if (bf_isampdu(bf))
db1a052b 1135 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1136 else
db1a052b 1137 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1138 }
1139
164ace38
SB
1140 spin_lock_bh(&txq->axq_lock);
1141 txq->axq_tx_inprogress = false;
1142 spin_unlock_bh(&txq->axq_lock);
1143
e8324357
S
1144 /* flush any pending frames if aggregation is enabled */
1145 if (sc->sc_flags & SC_OP_TXAGGR) {
1146 if (!retry_tx) {
1147 spin_lock_bh(&txq->axq_lock);
1148 ath_txq_drain_pending_buffers(sc, txq);
1149 spin_unlock_bh(&txq->axq_lock);
1150 }
1151 }
e5003249
VT
1152
1153 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1154 spin_lock_bh(&txq->axq_lock);
1155 while (!list_empty(&txq->txq_fifo_pending)) {
1156 bf = list_first_entry(&txq->txq_fifo_pending,
1157 struct ath_buf, list);
1158 list_cut_position(&bf_head,
1159 &txq->txq_fifo_pending,
1160 &bf->bf_lastbf->list);
1161 spin_unlock_bh(&txq->axq_lock);
1162
1163 if (bf_isampdu(bf))
1164 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1165 &ts, 0);
1166 else
1167 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1168 &ts, 0, 0);
1169 spin_lock_bh(&txq->axq_lock);
1170 }
1171 spin_unlock_bh(&txq->axq_lock);
1172 }
f078f209
LR
1173}
1174
043a0405 1175void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1176{
cbe61d8a 1177 struct ath_hw *ah = sc->sc_ah;
c46917bb 1178 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1179 struct ath_txq *txq;
1180 int i, npend = 0;
1181
1182 if (sc->sc_flags & SC_OP_INVALID)
1183 return;
1184
1185 /* Stop beacon queue */
1186 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1187
1188 /* Stop data queues */
1189 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1190 if (ATH_TXQ_SETUP(sc, i)) {
1191 txq = &sc->tx.txq[i];
1192 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1193 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1194 }
1195 }
1196
1197 if (npend) {
1198 int r;
1199
e8009e98 1200 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1201 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405
S
1202
1203 spin_lock_bh(&sc->sc_resetlock);
e8009e98 1204 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
043a0405 1205 if (r)
c46917bb
LR
1206 ath_print(common, ATH_DBG_FATAL,
1207 "Unable to reset hardware; reset status %d\n",
1208 r);
043a0405
S
1209 spin_unlock_bh(&sc->sc_resetlock);
1210 }
1211
1212 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1213 if (ATH_TXQ_SETUP(sc, i))
1214 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1215 }
e8324357 1216}
f078f209 1217
043a0405 1218void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1219{
043a0405
S
1220 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1221 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1222}
f078f209 1223
e8324357
S
1224void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1225{
1226 struct ath_atx_ac *ac;
1227 struct ath_atx_tid *tid;
f078f209 1228
e8324357
S
1229 if (list_empty(&txq->axq_acq))
1230 return;
f078f209 1231
e8324357
S
1232 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1233 list_del(&ac->list);
1234 ac->sched = false;
f078f209 1235
e8324357
S
1236 do {
1237 if (list_empty(&ac->tid_q))
1238 return;
f078f209 1239
e8324357
S
1240 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1241 list_del(&tid->list);
1242 tid->sched = false;
f078f209 1243
e8324357
S
1244 if (tid->paused)
1245 continue;
f078f209 1246
164ace38 1247 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1248
1249 /*
e8324357
S
1250 * add tid to round-robin queue if more frames
1251 * are pending for the tid
f078f209 1252 */
e8324357
S
1253 if (!list_empty(&tid->buf_q))
1254 ath_tx_queue_tid(txq, tid);
f078f209 1255
e8324357
S
1256 break;
1257 } while (!list_empty(&ac->tid_q));
f078f209 1258
e8324357
S
1259 if (!list_empty(&ac->tid_q)) {
1260 if (!ac->sched) {
1261 ac->sched = true;
1262 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1263 }
e8324357
S
1264 }
1265}
f078f209 1266
e8324357
S
1267int ath_tx_setup(struct ath_softc *sc, int haltype)
1268{
1269 struct ath_txq *txq;
f078f209 1270
e8324357 1271 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
1272 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1273 "HAL AC %u out of range, max %zu!\n",
e8324357
S
1274 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1275 return 0;
1276 }
1277 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1278 if (txq != NULL) {
1279 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1280 return 1;
1281 } else
1282 return 0;
f078f209
LR
1283}
1284
e8324357
S
1285/***********/
1286/* TX, DMA */
1287/***********/
1288
f078f209 1289/*
e8324357
S
1290 * Insert a chain of ath_buf (descriptors) on a txq and
1291 * assume the descriptors are already chained together by caller.
f078f209 1292 */
e8324357
S
1293static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1294 struct list_head *head)
f078f209 1295{
cbe61d8a 1296 struct ath_hw *ah = sc->sc_ah;
c46917bb 1297 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1298 struct ath_buf *bf;
f078f209 1299
e8324357
S
1300 /*
1301 * Insert the frame on the outbound list and
1302 * pass it on to the hardware.
1303 */
f078f209 1304
e8324357
S
1305 if (list_empty(head))
1306 return;
f078f209 1307
e8324357 1308 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1309
c46917bb
LR
1310 ath_print(common, ATH_DBG_QUEUE,
1311 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1312
e5003249
VT
1313 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1314 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1315 list_splice_tail_init(head, &txq->txq_fifo_pending);
1316 return;
1317 }
1318 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1319 ath_print(common, ATH_DBG_XMIT,
1320 "Initializing tx fifo %d which "
1321 "is non-empty\n",
1322 txq->txq_headidx);
1323 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1324 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1325 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1326 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1327 ath_print(common, ATH_DBG_XMIT,
1328 "TXDP[%u] = %llx (%p)\n",
1329 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1330 } else {
e5003249
VT
1331 list_splice_tail_init(head, &txq->axq_q);
1332
1333 if (txq->axq_link == NULL) {
1334 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1335 ath_print(common, ATH_DBG_XMIT,
1336 "TXDP[%u] = %llx (%p)\n",
1337 txq->axq_qnum, ito64(bf->bf_daddr),
1338 bf->bf_desc);
1339 } else {
1340 *txq->axq_link = bf->bf_daddr;
1341 ath_print(common, ATH_DBG_XMIT,
1342 "link[%u] (%p)=%llx (%p)\n",
1343 txq->axq_qnum, txq->axq_link,
1344 ito64(bf->bf_daddr), bf->bf_desc);
1345 }
1346 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1347 &txq->axq_link);
1348 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1349 }
e5003249 1350 txq->axq_depth++;
e8324357 1351}
f078f209 1352
e8324357
S
1353static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1354 struct list_head *bf_head,
1355 struct ath_tx_control *txctl)
f078f209
LR
1356{
1357 struct ath_buf *bf;
f078f209 1358
e8324357
S
1359 bf = list_first_entry(bf_head, struct ath_buf, list);
1360 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1361 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1362
e8324357
S
1363 /*
1364 * Do not queue to h/w when any of the following conditions is true:
1365 * - there are pending frames in software queue
1366 * - the TID is currently paused for ADDBA/BAR request
1367 * - seqno is not within block-ack window
1368 * - h/w queue depth exceeds low water mark
1369 */
1370 if (!list_empty(&tid->buf_q) || tid->paused ||
1371 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1372 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1373 /*
e8324357
S
1374 * Add this frame to software queue for scheduling later
1375 * for aggregation.
f078f209 1376 */
d43f3015 1377 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1378 ath_tx_queue_tid(txctl->txq, tid);
1379 return;
1380 }
1381
1382 /* Add sub-frame to BAW */
1383 ath_tx_addto_baw(sc, tid, bf);
1384
1385 /* Queue to h/w without aggregation */
1386 bf->bf_nframes = 1;
d43f3015 1387 bf->bf_lastbf = bf;
e8324357
S
1388 ath_buf_set_rate(sc, bf);
1389 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1390}
1391
c37452b0
S
1392static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1393 struct ath_atx_tid *tid,
1394 struct list_head *bf_head)
e8324357
S
1395{
1396 struct ath_buf *bf;
1397
e8324357
S
1398 bf = list_first_entry(bf_head, struct ath_buf, list);
1399 bf->bf_state.bf_type &= ~BUF_AMPDU;
1400
1401 /* update starting sequence number for subsequent ADDBA request */
1402 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1403
1404 bf->bf_nframes = 1;
d43f3015 1405 bf->bf_lastbf = bf;
e8324357
S
1406 ath_buf_set_rate(sc, bf);
1407 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1408 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1409}
1410
c37452b0
S
1411static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1412 struct list_head *bf_head)
1413{
1414 struct ath_buf *bf;
1415
1416 bf = list_first_entry(bf_head, struct ath_buf, list);
1417
1418 bf->bf_lastbf = bf;
1419 bf->bf_nframes = 1;
1420 ath_buf_set_rate(sc, bf);
1421 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1422 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1423}
1424
e8324357
S
1425static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1426{
1427 struct ieee80211_hdr *hdr;
1428 enum ath9k_pkt_type htype;
1429 __le16 fc;
1430
1431 hdr = (struct ieee80211_hdr *)skb->data;
1432 fc = hdr->frame_control;
1433
1434 if (ieee80211_is_beacon(fc))
1435 htype = ATH9K_PKT_TYPE_BEACON;
1436 else if (ieee80211_is_probe_resp(fc))
1437 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1438 else if (ieee80211_is_atim(fc))
1439 htype = ATH9K_PKT_TYPE_ATIM;
1440 else if (ieee80211_is_pspoll(fc))
1441 htype = ATH9K_PKT_TYPE_PSPOLL;
1442 else
1443 htype = ATH9K_PKT_TYPE_NORMAL;
1444
1445 return htype;
1446}
1447
e8324357
S
1448static int get_hw_crypto_keytype(struct sk_buff *skb)
1449{
1450 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1451
1452 if (tx_info->control.hw_key) {
1453 if (tx_info->control.hw_key->alg == ALG_WEP)
1454 return ATH9K_KEY_TYPE_WEP;
1455 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1456 return ATH9K_KEY_TYPE_TKIP;
1457 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1458 return ATH9K_KEY_TYPE_AES;
1459 }
1460
1461 return ATH9K_KEY_TYPE_CLEAR;
1462}
1463
1464static void assign_aggr_tid_seqno(struct sk_buff *skb,
1465 struct ath_buf *bf)
1466{
1467 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1468 struct ieee80211_hdr *hdr;
1469 struct ath_node *an;
1470 struct ath_atx_tid *tid;
1471 __le16 fc;
1472 u8 *qc;
1473
1474 if (!tx_info->control.sta)
1475 return;
1476
1477 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1478 hdr = (struct ieee80211_hdr *)skb->data;
1479 fc = hdr->frame_control;
1480
1481 if (ieee80211_is_data_qos(fc)) {
1482 qc = ieee80211_get_qos_ctl(hdr);
1483 bf->bf_tidno = qc[0] & 0xf;
1484 }
1485
1486 /*
1487 * For HT capable stations, we save tidno for later use.
1488 * We also override seqno set by upper layer with the one
1489 * in tx aggregation state.
e8324357
S
1490 */
1491 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1492 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1493 bf->bf_seqno = tid->seq_next;
1494 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1495}
1496
b0a33448 1497static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1498{
1499 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1500 int flags = 0;
1501
1502 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1503 flags |= ATH9K_TXDESC_INTREQ;
1504
1505 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1506 flags |= ATH9K_TXDESC_NOACK;
e8324357 1507
b0a33448
LR
1508 if (use_ldpc)
1509 flags |= ATH9K_TXDESC_LDPC;
1510
e8324357
S
1511 return flags;
1512}
1513
1514/*
1515 * rix - rate index
1516 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1517 * width - 0 for 20 MHz, 1 for 40 MHz
1518 * half_gi - to use 4us v/s 3.6 us for symbol time
1519 */
1520static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1521 int width, int half_gi, bool shortPreamble)
1522{
e8324357 1523 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1524 int streams, pktlen;
1525
1526 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1527
1528 /* find number of symbols: PLCP + data */
c6663876 1529 streams = HT_RC_2_STREAMS(rix);
e8324357 1530 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1531 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1532 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1533
1534 if (!half_gi)
1535 duration = SYMBOL_TIME(nsymbols);
1536 else
1537 duration = SYMBOL_TIME_HALFGI(nsymbols);
1538
1539 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1540 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1541
1542 return duration;
1543}
1544
1545static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1546{
43c27613 1547 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1548 struct ath9k_11n_rate_series series[4];
1549 struct sk_buff *skb;
1550 struct ieee80211_tx_info *tx_info;
1551 struct ieee80211_tx_rate *rates;
545750d3 1552 const struct ieee80211_rate *rate;
254ad0ff 1553 struct ieee80211_hdr *hdr;
c89424df
S
1554 int i, flags = 0;
1555 u8 rix = 0, ctsrate = 0;
254ad0ff 1556 bool is_pspoll;
e8324357
S
1557
1558 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1559
a22be22a 1560 skb = bf->bf_mpdu;
e8324357
S
1561 tx_info = IEEE80211_SKB_CB(skb);
1562 rates = tx_info->control.rates;
254ad0ff
S
1563 hdr = (struct ieee80211_hdr *)skb->data;
1564 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1565
e8324357 1566 /*
c89424df
S
1567 * We check if Short Preamble is needed for the CTS rate by
1568 * checking the BSS's global flag.
1569 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1570 */
545750d3
FF
1571 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1572 ctsrate = rate->hw_value;
c89424df 1573 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1574 ctsrate |= rate->hw_value_short;
e8324357 1575
e8324357 1576 for (i = 0; i < 4; i++) {
545750d3
FF
1577 bool is_40, is_sgi, is_sp;
1578 int phy;
1579
e8324357
S
1580 if (!rates[i].count || (rates[i].idx < 0))
1581 continue;
1582
1583 rix = rates[i].idx;
e8324357 1584 series[i].Tries = rates[i].count;
43c27613 1585 series[i].ChSel = common->tx_chainmask;
e8324357 1586
27032059
FF
1587 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1588 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1589 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1590 flags |= ATH9K_TXDESC_RTSENA;
1591 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1592 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1593 flags |= ATH9K_TXDESC_CTSENA;
1594 }
1595
c89424df
S
1596 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1597 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1598 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1599 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1600
545750d3
FF
1601 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1602 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1603 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1604
1605 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1606 /* MCS rates */
1607 series[i].Rate = rix | 0x80;
1608 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1609 is_40, is_sgi, is_sp);
074a8c0d
FF
1610 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1611 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1612 continue;
1613 }
1614
1615 /* legcay rates */
1616 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1617 !(rate->flags & IEEE80211_RATE_ERP_G))
1618 phy = WLAN_RC_PHY_CCK;
1619 else
1620 phy = WLAN_RC_PHY_OFDM;
1621
1622 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1623 series[i].Rate = rate->hw_value;
1624 if (rate->hw_value_short) {
1625 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1626 series[i].Rate |= rate->hw_value_short;
1627 } else {
1628 is_sp = false;
1629 }
1630
1631 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1632 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1633 }
1634
27032059
FF
1635 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1636 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1637 flags &= ~ATH9K_TXDESC_RTSENA;
1638
1639 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1640 if (flags & ATH9K_TXDESC_RTSENA)
1641 flags &= ~ATH9K_TXDESC_CTSENA;
1642
e8324357 1643 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1644 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1645 bf->bf_lastbf->bf_desc,
254ad0ff 1646 !is_pspoll, ctsrate,
c89424df 1647 0, series, 4, flags);
f078f209 1648
17d7904d 1649 if (sc->config.ath_aggr_prot && flags)
c89424df 1650 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1651}
1652
c52f33d0 1653static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1654 struct sk_buff *skb,
528f0c6b 1655 struct ath_tx_control *txctl)
f078f209 1656{
c52f33d0
JM
1657 struct ath_wiphy *aphy = hw->priv;
1658 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1659 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1660 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1661 int hdrlen;
1662 __le16 fc;
1bc14880 1663 int padpos, padsize;
b0a33448 1664 bool use_ldpc = false;
e022edbd 1665
827e69bf
FF
1666 tx_info->pad[0] = 0;
1667 switch (txctl->frame_type) {
c81494d5 1668 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1669 break;
c81494d5 1670 case ATH9K_IFT_PAUSE:
827e69bf
FF
1671 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1672 /* fall through */
c81494d5 1673 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1674 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1675 break;
1676 }
528f0c6b
S
1677 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1678 fc = hdr->frame_control;
f078f209 1679
528f0c6b 1680 ATH_TXBUF_RESET(bf);
f078f209 1681
827e69bf 1682 bf->aphy = aphy;
1bc14880
BP
1683 bf->bf_frmlen = skb->len + FCS_LEN;
1684 /* Remove the padding size from bf_frmlen, if any */
1685 padpos = ath9k_cmn_padpos(hdr->frame_control);
1686 padsize = padpos & 3;
1687 if (padsize && skb->len>padpos+padsize) {
1688 bf->bf_frmlen -= padsize;
1689 }
cd3d39a6 1690
b0a33448 1691 if (conf_is_ht(&hw->conf)) {
c656bbb5 1692 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1693 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1694 use_ldpc = true;
1695 }
528f0c6b 1696
b0a33448 1697 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1698
528f0c6b 1699 bf->bf_keytype = get_hw_crypto_keytype(skb);
528f0c6b
S
1700 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1701 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1702 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1703 } else {
1704 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1705 }
1706
17b182e3
S
1707 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1708 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1709 assign_aggr_tid_seqno(skb, bf);
1710
f078f209 1711 bf->bf_mpdu = skb;
f8316df1 1712
7da3c55c
GJ
1713 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1714 skb->len, DMA_TO_DEVICE);
1715 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
f8316df1 1716 bf->bf_mpdu = NULL;
c46917bb
LR
1717 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1718 "dma_mapping_error() on TX\n");
f8316df1
LR
1719 return -ENOMEM;
1720 }
1721
528f0c6b 1722 bf->bf_buf_addr = bf->bf_dmacontext;
e7824a50
LR
1723
1724 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1725 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1726 bf->bf_isnullfunc = true;
1b04b930 1727 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
e7824a50
LR
1728 } else
1729 bf->bf_isnullfunc = false;
1730
7c9fd60f
VT
1731 bf->bf_tx_aborted = false;
1732
f8316df1 1733 return 0;
528f0c6b
S
1734}
1735
1736/* FIXME: tx power */
1737static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1738 struct ath_tx_control *txctl)
1739{
a22be22a 1740 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1741 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1742 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1743 struct ath_node *an = NULL;
1744 struct list_head bf_head;
1745 struct ath_desc *ds;
1746 struct ath_atx_tid *tid;
cbe61d8a 1747 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1748 int frm_type;
c37452b0 1749 __le16 fc;
528f0c6b 1750
528f0c6b 1751 frm_type = get_hw_packet_type(skb);
c37452b0 1752 fc = hdr->frame_control;
528f0c6b
S
1753
1754 INIT_LIST_HEAD(&bf_head);
1755 list_add_tail(&bf->list, &bf_head);
f078f209 1756
f078f209 1757 ds = bf->bf_desc;
87d5efbb 1758 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1759
528f0c6b
S
1760 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1761 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1762
1763 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1764 skb->len, /* segment length */
1765 true, /* first segment */
1766 true, /* last segment */
3f3a1c80 1767 ds, /* first descriptor */
cc610ac0
VT
1768 bf->bf_buf_addr,
1769 txctl->txq->axq_qnum);
f078f209 1770
528f0c6b 1771 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1772
f1617967
JL
1773 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1774 tx_info->control.sta) {
1775 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1776 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1777
c37452b0
S
1778 if (!ieee80211_is_data_qos(fc)) {
1779 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1780 goto tx_done;
1781 }
1782
4fdec031 1783 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1784 /*
1785 * Try aggregation if it's a unicast data frame
1786 * and the destination is HT capable.
1787 */
528f0c6b 1788 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1789 } else {
1790 /*
528f0c6b
S
1791 * Send this frame as regular when ADDBA
1792 * exchange is neither complete nor pending.
f078f209 1793 */
c37452b0
S
1794 ath_tx_send_ht_normal(sc, txctl->txq,
1795 tid, &bf_head);
f078f209
LR
1796 }
1797 } else {
c37452b0 1798 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1799 }
528f0c6b 1800
c37452b0 1801tx_done:
528f0c6b 1802 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1803}
1804
f8316df1 1805/* Upon failure caller should free skb */
c52f33d0 1806int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1807 struct ath_tx_control *txctl)
f078f209 1808{
c52f33d0
JM
1809 struct ath_wiphy *aphy = hw->priv;
1810 struct ath_softc *sc = aphy->sc;
c46917bb 1811 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
528f0c6b 1812 struct ath_buf *bf;
f8316df1 1813 int r;
f078f209 1814
528f0c6b
S
1815 bf = ath_tx_get_buffer(sc);
1816 if (!bf) {
c46917bb 1817 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1818 return -1;
1819 }
1820
c52f33d0 1821 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1822 if (unlikely(r)) {
c112d0c5
LR
1823 struct ath_txq *txq = txctl->txq;
1824
c46917bb 1825 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1826
1827 /* upon ath_tx_processq() this TX queue will be resumed, we
1828 * guarantee this will happen by knowing beforehand that
1829 * we will at least have to run TX completionon one buffer
1830 * on the queue */
1831 spin_lock_bh(&txq->axq_lock);
f7a99e46 1832 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
f52de03b 1833 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
c112d0c5
LR
1834 txq->stopped = 1;
1835 }
1836 spin_unlock_bh(&txq->axq_lock);
1837
0a8cea84 1838 ath_tx_return_buffer(sc, bf);
c112d0c5 1839
f8316df1
LR
1840 return r;
1841 }
1842
8f93b8b3 1843 ath_tx_start_dma(sc, bf, txctl);
f078f209 1844
528f0c6b 1845 return 0;
f078f209
LR
1846}
1847
c52f33d0 1848void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1849{
c52f33d0
JM
1850 struct ath_wiphy *aphy = hw->priv;
1851 struct ath_softc *sc = aphy->sc;
c46917bb 1852 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1854 int padpos, padsize;
e8324357
S
1855 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1856 struct ath_tx_control txctl;
f078f209 1857
e8324357 1858 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1859
1860 /*
e8324357
S
1861 * As a temporary workaround, assign seq# here; this will likely need
1862 * to be cleaned up to work better with Beacon transmission and virtual
1863 * BSSes.
f078f209 1864 */
e8324357 1865 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1866 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1867 sc->tx.seq_no += 0x10;
1868 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1869 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1870 }
f078f209 1871
e8324357 1872 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1873 padpos = ath9k_cmn_padpos(hdr->frame_control);
1874 padsize = padpos & 3;
1875 if (padsize && skb->len>padpos) {
e8324357 1876 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1877 ath_print(common, ATH_DBG_XMIT,
1878 "TX CABQ padding failed\n");
e8324357
S
1879 dev_kfree_skb_any(skb);
1880 return;
1881 }
1882 skb_push(skb, padsize);
4d91f9f3 1883 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1884 }
f078f209 1885
e8324357 1886 txctl.txq = sc->beacon.cabq;
f078f209 1887
c46917bb
LR
1888 ath_print(common, ATH_DBG_XMIT,
1889 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1890
c52f33d0 1891 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1892 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1893 goto exit;
f078f209 1894 }
f078f209 1895
e8324357
S
1896 return;
1897exit:
1898 dev_kfree_skb_any(skb);
f078f209
LR
1899}
1900
e8324357
S
1901/*****************/
1902/* TX Completion */
1903/*****************/
528f0c6b 1904
e8324357 1905static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
827e69bf 1906 struct ath_wiphy *aphy, int tx_flags)
528f0c6b 1907{
e8324357
S
1908 struct ieee80211_hw *hw = sc->hw;
1909 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1910 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1911 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1912 int padpos, padsize;
528f0c6b 1913
c46917bb 1914 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1915
827e69bf
FF
1916 if (aphy)
1917 hw = aphy->hw;
528f0c6b 1918
6b2c4032 1919 if (tx_flags & ATH_TX_BAR)
e8324357 1920 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1921
6b2c4032 1922 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1923 /* Frame was ACKed */
1924 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1925 }
1926
4d91f9f3
BP
1927 padpos = ath9k_cmn_padpos(hdr->frame_control);
1928 padsize = padpos & 3;
1929 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1930 /*
1931 * Remove MAC header padding before giving the frame back to
1932 * mac80211.
1933 */
4d91f9f3 1934 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1935 skb_pull(skb, padsize);
1936 }
528f0c6b 1937
1b04b930
S
1938 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1939 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1940 ath_print(common, ATH_DBG_PS,
1941 "Going back to sleep after having "
f643e51d 1942 "received TX status (0x%lx)\n",
1b04b930
S
1943 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1944 PS_WAIT_FOR_CAB |
1945 PS_WAIT_FOR_PSPOLL_DATA |
1946 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1947 }
1948
827e69bf 1949 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1950 ath9k_tx_status(hw, skb);
827e69bf
FF
1951 else
1952 ieee80211_tx_status(hw, skb);
e8324357 1953}
f078f209 1954
e8324357 1955static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1956 struct ath_txq *txq, struct list_head *bf_q,
1957 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1958{
e8324357 1959 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1960 unsigned long flags;
6b2c4032 1961 int tx_flags = 0;
f078f209 1962
e8324357 1963 if (sendbar)
6b2c4032 1964 tx_flags = ATH_TX_BAR;
f078f209 1965
e8324357 1966 if (!txok) {
6b2c4032 1967 tx_flags |= ATH_TX_ERROR;
f078f209 1968
e8324357 1969 if (bf_isxretried(bf))
6b2c4032 1970 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1971 }
1972
e8324357 1973 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
827e69bf 1974 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
db1a052b 1975 ath_debug_stat_tx(sc, txq, bf, ts);
e8324357
S
1976
1977 /*
1978 * Return the list of ath_buf of this mpdu to free queue
1979 */
1980 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1981 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1982 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1983}
1984
e8324357 1985static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1986 struct ath_tx_status *ts, int txok)
f078f209 1987{
e8324357
S
1988 u16 seq_st = 0;
1989 u32 ba[WME_BA_BMP_SIZE >> 5];
1990 int ba_index;
1991 int nbad = 0;
1992 int isaggr = 0;
f078f209 1993
7c9fd60f 1994 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1995 return 0;
f078f209 1996
e8324357
S
1997 isaggr = bf_isaggr(bf);
1998 if (isaggr) {
db1a052b
FF
1999 seq_st = ts->ts_seqnum;
2000 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 2001 }
f078f209 2002
e8324357
S
2003 while (bf) {
2004 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2005 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2006 nbad++;
2007
2008 bf = bf->bf_next;
2009 }
f078f209 2010
e8324357
S
2011 return nbad;
2012}
f078f209 2013
db1a052b 2014static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 2015 int nbad, int txok, bool update_rc)
f078f209 2016{
a22be22a 2017 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2018 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2019 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 2020 struct ieee80211_hw *hw = bf->aphy->hw;
8a92e2ee 2021 u8 i, tx_rateindex;
f078f209 2022
95e4acb7 2023 if (txok)
db1a052b 2024 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2025
db1a052b 2026 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2027 WARN_ON(tx_rateindex >= hw->max_rates);
2028
db1a052b 2029 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2030 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
d969847c
FF
2031 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2032 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2033
db1a052b 2034 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 2035 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
254ad0ff 2036 if (ieee80211_is_data(hdr->frame_control)) {
db1a052b 2037 if (ts->ts_flags &
827e69bf
FF
2038 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2039 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
db1a052b
FF
2040 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2041 (ts->ts_status & ATH9K_TXERR_FIFO))
827e69bf
FF
2042 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2043 tx_info->status.ampdu_len = bf->bf_nframes;
2044 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
e8324357 2045 }
f078f209 2046 }
8a92e2ee 2047
545750d3 2048 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2049 tx_info->status.rates[i].count = 0;
545750d3
FF
2050 tx_info->status.rates[i].idx = -1;
2051 }
8a92e2ee
VT
2052
2053 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
f078f209
LR
2054}
2055
059d806c
S
2056static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2057{
2058 int qnum;
2059
2060 spin_lock_bh(&txq->axq_lock);
2061 if (txq->stopped &&
f7a99e46 2062 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
059d806c
S
2063 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
2064 if (qnum != -1) {
f52de03b 2065 ath_mac80211_start_queue(sc, qnum);
059d806c
S
2066 txq->stopped = 0;
2067 }
2068 }
2069 spin_unlock_bh(&txq->axq_lock);
2070}
2071
e8324357 2072static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2073{
cbe61d8a 2074 struct ath_hw *ah = sc->sc_ah;
c46917bb 2075 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2076 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2077 struct list_head bf_head;
e8324357 2078 struct ath_desc *ds;
29bffa96 2079 struct ath_tx_status ts;
0934af23 2080 int txok;
e8324357 2081 int status;
f078f209 2082
c46917bb
LR
2083 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2084 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2085 txq->axq_link);
f078f209 2086
f078f209
LR
2087 for (;;) {
2088 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2089 if (list_empty(&txq->axq_q)) {
2090 txq->axq_link = NULL;
f078f209
LR
2091 spin_unlock_bh(&txq->axq_lock);
2092 break;
2093 }
f078f209
LR
2094 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2095
e8324357
S
2096 /*
2097 * There is a race condition that a BH gets scheduled
2098 * after sw writes TxE and before hw re-load the last
2099 * descriptor to get the newly chained one.
2100 * Software must keep the last DONE descriptor as a
2101 * holding descriptor - software does so by marking
2102 * it with the STALE flag.
2103 */
2104 bf_held = NULL;
a119cc49 2105 if (bf->bf_stale) {
e8324357
S
2106 bf_held = bf;
2107 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2108 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2109 break;
2110 } else {
2111 bf = list_entry(bf_held->list.next,
6ef9b13d 2112 struct ath_buf, list);
e8324357 2113 }
f078f209
LR
2114 }
2115
2116 lastbf = bf->bf_lastbf;
e8324357 2117 ds = lastbf->bf_desc;
f078f209 2118
29bffa96
FF
2119 memset(&ts, 0, sizeof(ts));
2120 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2121 if (status == -EINPROGRESS) {
f078f209 2122 spin_unlock_bh(&txq->axq_lock);
e8324357 2123 break;
f078f209 2124 }
f078f209 2125
e7824a50
LR
2126 /*
2127 * We now know the nullfunc frame has been ACKed so we
2128 * can disable RX.
2129 */
2130 if (bf->bf_isnullfunc &&
29bffa96 2131 (ts.ts_status & ATH9K_TX_ACKED)) {
3f7c5c10
SB
2132 if ((sc->ps_flags & PS_ENABLED))
2133 ath9k_enable_ps(sc);
2134 else
1b04b930 2135 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
e7824a50
LR
2136 }
2137
e8324357
S
2138 /*
2139 * Remove ath_buf's of the same transmit unit from txq,
2140 * however leave the last descriptor back as the holding
2141 * descriptor for hw.
2142 */
a119cc49 2143 lastbf->bf_stale = true;
e8324357 2144 INIT_LIST_HEAD(&bf_head);
e8324357
S
2145 if (!list_is_singular(&lastbf->list))
2146 list_cut_position(&bf_head,
2147 &txq->axq_q, lastbf->list.prev);
f078f209 2148
e8324357 2149 txq->axq_depth--;
29bffa96 2150 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2151 txq->axq_tx_inprogress = false;
0a8cea84
FF
2152 if (bf_held)
2153 list_del(&bf_held->list);
e8324357 2154 spin_unlock_bh(&txq->axq_lock);
f078f209 2155
0a8cea84
FF
2156 if (bf_held)
2157 ath_tx_return_buffer(sc, bf_held);
f078f209 2158
e8324357
S
2159 if (!bf_isampdu(bf)) {
2160 /*
2161 * This frame is sent out as a single frame.
2162 * Use hardware retry status for this frame.
2163 */
29bffa96
FF
2164 bf->bf_retries = ts.ts_longretry;
2165 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2166 bf->bf_state.bf_type |= BUF_XRETRY;
29bffa96 2167 ath_tx_rc_status(bf, &ts, 0, txok, true);
e8324357 2168 }
f078f209 2169
e8324357 2170 if (bf_isampdu(bf))
29bffa96 2171 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2172 else
29bffa96 2173 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2174
059d806c 2175 ath_wake_mac80211_queue(sc, txq);
8469cdef 2176
059d806c 2177 spin_lock_bh(&txq->axq_lock);
e8324357
S
2178 if (sc->sc_flags & SC_OP_TXAGGR)
2179 ath_txq_schedule(sc, txq);
2180 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2181 }
2182}
2183
305fe47f 2184static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2185{
2186 struct ath_softc *sc = container_of(work, struct ath_softc,
2187 tx_complete_work.work);
2188 struct ath_txq *txq;
2189 int i;
2190 bool needreset = false;
2191
2192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2193 if (ATH_TXQ_SETUP(sc, i)) {
2194 txq = &sc->tx.txq[i];
2195 spin_lock_bh(&txq->axq_lock);
2196 if (txq->axq_depth) {
2197 if (txq->axq_tx_inprogress) {
2198 needreset = true;
2199 spin_unlock_bh(&txq->axq_lock);
2200 break;
2201 } else {
2202 txq->axq_tx_inprogress = true;
2203 }
2204 }
2205 spin_unlock_bh(&txq->axq_lock);
2206 }
2207
2208 if (needreset) {
c46917bb
LR
2209 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2210 "tx hung, resetting the chip\n");
332c5566 2211 ath9k_ps_wakeup(sc);
164ace38 2212 ath_reset(sc, false);
332c5566 2213 ath9k_ps_restore(sc);
164ace38
SB
2214 }
2215
42935eca 2216 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2217 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2218}
2219
2220
f078f209 2221
e8324357 2222void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2223{
e8324357
S
2224 int i;
2225 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2226
e8324357 2227 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2228
e8324357
S
2229 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2230 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2231 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2232 }
2233}
2234
e5003249
VT
2235void ath_tx_edma_tasklet(struct ath_softc *sc)
2236{
2237 struct ath_tx_status txs;
2238 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2239 struct ath_hw *ah = sc->sc_ah;
2240 struct ath_txq *txq;
2241 struct ath_buf *bf, *lastbf;
2242 struct list_head bf_head;
2243 int status;
2244 int txok;
2245
2246 for (;;) {
2247 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2248 if (status == -EINPROGRESS)
2249 break;
2250 if (status == -EIO) {
2251 ath_print(common, ATH_DBG_XMIT,
2252 "Error processing tx status\n");
2253 break;
2254 }
2255
2256 /* Skip beacon completions */
2257 if (txs.qid == sc->beacon.beaconq)
2258 continue;
2259
2260 txq = &sc->tx.txq[txs.qid];
2261
2262 spin_lock_bh(&txq->axq_lock);
2263 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2264 spin_unlock_bh(&txq->axq_lock);
2265 return;
2266 }
2267
2268 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2269 struct ath_buf, list);
2270 lastbf = bf->bf_lastbf;
2271
2272 INIT_LIST_HEAD(&bf_head);
2273 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2274 &lastbf->list);
2275 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2276 txq->axq_depth--;
2277 txq->axq_tx_inprogress = false;
2278 spin_unlock_bh(&txq->axq_lock);
2279
2280 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2281
2282 if (!bf_isampdu(bf)) {
2283 bf->bf_retries = txs.ts_longretry;
2284 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2285 bf->bf_state.bf_type |= BUF_XRETRY;
2286 ath_tx_rc_status(bf, &txs, 0, txok, true);
2287 }
2288
2289 if (bf_isampdu(bf))
2290 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2291 else
2292 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2293 &txs, txok, 0);
2294
7f9f3600
FF
2295 ath_wake_mac80211_queue(sc, txq);
2296
e5003249
VT
2297 spin_lock_bh(&txq->axq_lock);
2298 if (!list_empty(&txq->txq_fifo_pending)) {
2299 INIT_LIST_HEAD(&bf_head);
2300 bf = list_first_entry(&txq->txq_fifo_pending,
2301 struct ath_buf, list);
2302 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2303 &bf->bf_lastbf->list);
2304 ath_tx_txqaddbuf(sc, txq, &bf_head);
2305 } else if (sc->sc_flags & SC_OP_TXAGGR)
2306 ath_txq_schedule(sc, txq);
2307 spin_unlock_bh(&txq->axq_lock);
2308 }
2309}
2310
e8324357
S
2311/*****************/
2312/* Init, Cleanup */
2313/*****************/
f078f209 2314
5088c2f1
VT
2315static int ath_txstatus_setup(struct ath_softc *sc, int size)
2316{
2317 struct ath_descdma *dd = &sc->txsdma;
2318 u8 txs_len = sc->sc_ah->caps.txs_len;
2319
2320 dd->dd_desc_len = size * txs_len;
2321 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2322 &dd->dd_desc_paddr, GFP_KERNEL);
2323 if (!dd->dd_desc)
2324 return -ENOMEM;
2325
2326 return 0;
2327}
2328
2329static int ath_tx_edma_init(struct ath_softc *sc)
2330{
2331 int err;
2332
2333 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2334 if (!err)
2335 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2336 sc->txsdma.dd_desc_paddr,
2337 ATH_TXSTATUS_RING_SIZE);
2338
2339 return err;
2340}
2341
2342static void ath_tx_edma_cleanup(struct ath_softc *sc)
2343{
2344 struct ath_descdma *dd = &sc->txsdma;
2345
2346 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2347 dd->dd_desc_paddr);
2348}
2349
e8324357 2350int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2351{
c46917bb 2352 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2353 int error = 0;
f078f209 2354
797fe5cb 2355 spin_lock_init(&sc->tx.txbuflock);
f078f209 2356
797fe5cb 2357 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2358 "tx", nbufs, 1, 1);
797fe5cb 2359 if (error != 0) {
c46917bb
LR
2360 ath_print(common, ATH_DBG_FATAL,
2361 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2362 goto err;
2363 }
f078f209 2364
797fe5cb 2365 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2366 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2367 if (error != 0) {
c46917bb
LR
2368 ath_print(common, ATH_DBG_FATAL,
2369 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2370 goto err;
2371 }
f078f209 2372
164ace38
SB
2373 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2374
5088c2f1
VT
2375 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2376 error = ath_tx_edma_init(sc);
2377 if (error)
2378 goto err;
2379 }
2380
797fe5cb 2381err:
e8324357
S
2382 if (error != 0)
2383 ath_tx_cleanup(sc);
f078f209 2384
e8324357 2385 return error;
f078f209
LR
2386}
2387
797fe5cb 2388void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2389{
2390 if (sc->beacon.bdma.dd_desc_len != 0)
2391 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2392
2393 if (sc->tx.txdma.dd_desc_len != 0)
2394 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2395
2396 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2397 ath_tx_edma_cleanup(sc);
e8324357 2398}
f078f209
LR
2399
2400void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2401{
c5170163
S
2402 struct ath_atx_tid *tid;
2403 struct ath_atx_ac *ac;
2404 int tidno, acno;
f078f209 2405
8ee5afbc 2406 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2407 tidno < WME_NUM_TID;
2408 tidno++, tid++) {
2409 tid->an = an;
2410 tid->tidno = tidno;
2411 tid->seq_start = tid->seq_next = 0;
2412 tid->baw_size = WME_MAX_BA;
2413 tid->baw_head = tid->baw_tail = 0;
2414 tid->sched = false;
e8324357 2415 tid->paused = false;
a37c2c79 2416 tid->state &= ~AGGR_CLEANUP;
c5170163 2417 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2418 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2419 tid->ac = &an->ac[acno];
a37c2c79
S
2420 tid->state &= ~AGGR_ADDBA_COMPLETE;
2421 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2422 }
f078f209 2423
8ee5afbc 2424 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2425 acno < WME_NUM_AC; acno++, ac++) {
2426 ac->sched = false;
2427 INIT_LIST_HEAD(&ac->tid_q);
2428
2429 switch (acno) {
2430 case WME_AC_BE:
2431 ac->qnum = ath_tx_get_qnum(sc,
2432 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2433 break;
2434 case WME_AC_BK:
2435 ac->qnum = ath_tx_get_qnum(sc,
2436 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2437 break;
2438 case WME_AC_VI:
2439 ac->qnum = ath_tx_get_qnum(sc,
2440 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2441 break;
2442 case WME_AC_VO:
2443 ac->qnum = ath_tx_get_qnum(sc,
2444 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2445 break;
f078f209
LR
2446 }
2447 }
2448}
2449
b5aa9bf9 2450void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209
LR
2451{
2452 int i;
2453 struct ath_atx_ac *ac, *ac_tmp;
2454 struct ath_atx_tid *tid, *tid_tmp;
2455 struct ath_txq *txq;
e8324357 2456
f078f209
LR
2457 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2458 if (ATH_TXQ_SETUP(sc, i)) {
b77f483f 2459 txq = &sc->tx.txq[i];
f078f209 2460
a9f042cb 2461 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2462
2463 list_for_each_entry_safe(ac,
2464 ac_tmp, &txq->axq_acq, list) {
2465 tid = list_first_entry(&ac->tid_q,
2466 struct ath_atx_tid, list);
2467 if (tid && tid->an != an)
2468 continue;
2469 list_del(&ac->list);
2470 ac->sched = false;
2471
2472 list_for_each_entry_safe(tid,
2473 tid_tmp, &ac->tid_q, list) {
2474 list_del(&tid->list);
2475 tid->sched = false;
b5aa9bf9 2476 ath_tid_drain(sc, txq, tid);
a37c2c79 2477 tid->state &= ~AGGR_ADDBA_COMPLETE;
a37c2c79 2478 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
2479 }
2480 }
2481
a9f042cb 2482 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2483 }
2484 }
2485}