]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/wireless/ath/ath9k/xmit.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[net-next-2.6.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357
S
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
f078f209 126
e8324357
S
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
130}
131
e8324357 132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 133{
e8324357 134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
e6a9854b 135
9680e8a3 136 BUG_ON(tid->paused <= 0);
e8324357 137 spin_lock_bh(&txq->axq_lock);
f078f209 138
e8324357 139 tid->paused--;
f078f209 140
e8324357
S
141 if (tid->paused > 0)
142 goto unlock;
f078f209 143
e8324357
S
144 if (list_empty(&tid->buf_q))
145 goto unlock;
f078f209 146
e8324357
S
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
528f0c6b 151}
f078f209 152
e8324357 153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 154{
e8324357
S
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
f078f209 159
9680e8a3 160 BUG_ON(tid->paused <= 0);
e8324357 161 spin_lock_bh(&txq->axq_lock);
e6a9854b 162
e8324357 163 tid->paused--;
f078f209 164
e8324357
S
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
f078f209 169
e8324357
S
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
9680e8a3 172 BUG_ON(bf_isretried(bf));
d43f3015 173 list_move_tail(&bf->list, &bf_head);
c37452b0 174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
528f0c6b 175 }
f078f209 176
e8324357 177 spin_unlock_bh(&txq->axq_lock);
528f0c6b 178}
f078f209 179
e8324357
S
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
528f0c6b 182{
e8324357 183 int index, cindex;
f078f209 184
e8324357
S
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 187
e8324357 188 tid->tx_buf[cindex] = NULL;
528f0c6b 189
e8324357
S
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
528f0c6b 194}
f078f209 195
e8324357
S
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
528f0c6b 198{
e8324357 199 int index, cindex;
528f0c6b 200
e8324357
S
201 if (bf_isretried(bf))
202 return;
528f0c6b 203
e8324357
S
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 206
9680e8a3 207 BUG_ON(tid->tx_buf[cindex] != NULL);
e8324357 208 tid->tx_buf[cindex] = bf;
f078f209 209
e8324357
S
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 214 }
f078f209
LR
215}
216
217/*
e8324357
S
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
f078f209 222 */
e8324357
S
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
f078f209 225
f078f209 226{
e8324357
S
227 struct ath_buf *bf;
228 struct list_head bf_head;
db1a052b
FF
229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
e8324357 232 INIT_LIST_HEAD(&bf_head);
f078f209 233
e8324357
S
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
f078f209 237
d43f3015
S
238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
f078f209 240
e8324357
S
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 243
e8324357 244 spin_unlock(&txq->axq_lock);
db1a052b 245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
246 spin_lock(&txq->axq_lock);
247 }
f078f209 248
e8324357
S
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
f078f209
LR
251}
252
fec247c0
S
253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
f078f209 255{
e8324357
S
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
f078f209 258
e8324357
S
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
fec247c0 261 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 262
e8324357
S
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
266}
267
0a8cea84 268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 269{
0a8cea84 270 struct ath_buf *bf = NULL;
d43f3015
S
271
272 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
0a8cea84
FF
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
d43f3015
S
282 spin_unlock_bh(&sc->tx.txbuflock);
283
0a8cea84
FF
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
300 return NULL;
301
d43f3015
S
302 ATH_TXBUF_RESET(tbf);
303
827e69bf 304 tbf->aphy = bf->aphy;
d43f3015
S
305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015
S
308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 316 struct ath_tx_status *ts, int txok)
f078f209 317{
e8324357
S
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
1286ec6d 320 struct ieee80211_sta *sta;
76d5a9e8 321 struct ieee80211_hw *hw;
1286ec6d 322 struct ieee80211_hdr *hdr;
76d5a9e8 323 struct ieee80211_tx_info *tx_info;
e8324357 324 struct ath_atx_tid *tid = NULL;
d43f3015 325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 326 struct list_head bf_head, bf_pending;
0934af23 327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 328 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
f078f209 331
a22be22a 332 skb = bf->bf_mpdu;
1286ec6d
S
333 hdr = (struct ieee80211_hdr *)skb->data;
334
76d5a9e8 335 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 336 hw = bf->aphy->hw;
76d5a9e8 337
1286ec6d 338 rcu_read_lock();
f078f209 339
5ed176e1 340 /* XXX: use ieee80211_find_sta! */
76d5a9e8 341 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
1286ec6d
S
342 if (!sta) {
343 rcu_read_unlock();
344 return;
f078f209
LR
345 }
346
1286ec6d
S
347 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349
e8324357 350 isaggr = bf_isaggr(bf);
d43f3015 351 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 352
d43f3015 353 if (isaggr && txok) {
db1a052b
FF
354 if (ts->ts_flags & ATH9K_TX_BA) {
355 seq_st = ts->ts_seqnum;
356 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 357 } else {
d43f3015
S
358 /*
359 * AR5416 can become deaf/mute when BA
360 * issue happens. Chip needs to be reset.
361 * But AP code may have sychronization issues
362 * when perform internal reset in this routine.
363 * Only enable reset in STA mode for now.
364 */
2660b81a 365 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 366 needreset = 1;
e8324357 367 }
f078f209
LR
368 }
369
e8324357
S
370 INIT_LIST_HEAD(&bf_pending);
371 INIT_LIST_HEAD(&bf_head);
f078f209 372
db1a052b 373 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
374 while (bf) {
375 txfail = txpending = 0;
376 bf_next = bf->bf_next;
f078f209 377
e8324357
S
378 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
379 /* transmit completion, subframe is
380 * acked by block ack */
0934af23 381 acked_cnt++;
e8324357
S
382 } else if (!isaggr && txok) {
383 /* transmit completion */
0934af23 384 acked_cnt++;
e8324357 385 } else {
e8324357 386 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 387 !bf_last->bf_tx_aborted) {
e8324357 388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 389 ath_tx_set_retry(sc, txq, bf);
e8324357
S
390 txpending = 1;
391 } else {
392 bf->bf_state.bf_type |= BUF_XRETRY;
393 txfail = 1;
394 sendbar = 1;
0934af23 395 txfail_cnt++;
e8324357
S
396 }
397 } else {
398 /*
399 * cleanup in progress, just fail
400 * the un-acked sub-frames
401 */
402 txfail = 1;
403 }
404 }
f078f209 405
e5003249
VT
406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
407 bf_next == NULL) {
cbfe89c6
VT
408 /*
409 * Make sure the last desc is reclaimed if it
410 * not a holding desc.
411 */
412 if (!bf_last->bf_stale)
413 list_move_tail(&bf->list, &bf_head);
414 else
415 INIT_LIST_HEAD(&bf_head);
e8324357 416 } else {
9680e8a3 417 BUG_ON(list_empty(bf_q));
d43f3015 418 list_move_tail(&bf->list, &bf_head);
e8324357 419 }
f078f209 420
e8324357
S
421 if (!txpending) {
422 /*
423 * complete the acked-ones/xretried ones; update
424 * block-ack window
425 */
426 spin_lock_bh(&txq->axq_lock);
427 ath_tx_update_baw(sc, tid, bf->bf_seqno);
428 spin_unlock_bh(&txq->axq_lock);
f078f209 429
8a92e2ee 430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
db1a052b 431 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
432 rc_update = false;
433 } else {
db1a052b 434 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
435 }
436
db1a052b
FF
437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
438 !txfail, sendbar);
e8324357 439 } else {
d43f3015 440 /* retry the un-acked ones */
e5003249
VT
441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
442 if (bf->bf_next == NULL && bf_last->bf_stale) {
443 struct ath_buf *tbf;
444
445 tbf = ath_clone_txbuf(sc, bf_last);
446 /*
447 * Update tx baw and complete the
448 * frame with failed status if we
449 * run out of tx buf.
450 */
451 if (!tbf) {
452 spin_lock_bh(&txq->axq_lock);
453 ath_tx_update_baw(sc, tid,
454 bf->bf_seqno);
455 spin_unlock_bh(&txq->axq_lock);
456
457 bf->bf_state.bf_type |=
458 BUF_XRETRY;
459 ath_tx_rc_status(bf, ts, nbad,
460 0, false);
461 ath_tx_complete_buf(sc, bf, txq,
462 &bf_head,
463 ts, 0, 0);
464 break;
465 }
466
467 ath9k_hw_cleartxdesc(sc->sc_ah,
468 tbf->bf_desc);
469 list_add_tail(&tbf->list, &bf_head);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah,
476 bf->bf_desc);
c41d92dc 477 }
e8324357
S
478 }
479
480 /*
481 * Put this buffer to the temporary pending
482 * queue to retain ordering
483 */
484 list_splice_tail_init(&bf_head, &bf_pending);
485 }
486
487 bf = bf_next;
f078f209 488 }
f078f209 489
e8324357 490 if (tid->state & AGGR_CLEANUP) {
e8324357
S
491 if (tid->baw_head == tid->baw_tail) {
492 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 493 tid->state &= ~AGGR_CLEANUP;
e63835b0 494
e8324357
S
495 /* send buffered frames as singles */
496 ath_tx_flush_tid(sc, tid);
d43f3015 497 }
1286ec6d 498 rcu_read_unlock();
e8324357
S
499 return;
500 }
f078f209 501
d43f3015 502 /* prepend un-acked frames to the beginning of the pending frame queue */
e8324357
S
503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
102e0572 509
1286ec6d
S
510 rcu_read_unlock();
511
e8324357
S
512 if (needreset)
513 ath_reset(sc, false);
e8324357 514}
f078f209 515
e8324357
S
516static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
517 struct ath_atx_tid *tid)
f078f209 518{
528f0c6b
S
519 struct sk_buff *skb;
520 struct ieee80211_tx_info *tx_info;
a8efee4f 521 struct ieee80211_tx_rate *rates;
d43f3015 522 u32 max_4ms_framelen, frmlen;
4ef70841 523 u16 aggr_limit, legacy = 0;
e8324357 524 int i;
528f0c6b 525
a22be22a 526 skb = bf->bf_mpdu;
528f0c6b 527 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 528 rates = tx_info->control.rates;
528f0c6b 529
e8324357
S
530 /*
531 * Find the lowest frame length among the rate series that will have a
532 * 4ms transmit duration.
533 * TODO - TXOP limit needs to be considered.
534 */
535 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 536
e8324357
S
537 for (i = 0; i < 4; i++) {
538 if (rates[i].count) {
545750d3
FF
539 int modeidx;
540 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
541 legacy = 1;
542 break;
543 }
544
0e668cde 545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
546 modeidx = MCS_HT40;
547 else
0e668cde
FF
548 modeidx = MCS_HT20;
549
550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
551 modeidx++;
545750d3
FF
552
553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 554 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
555 }
556 }
e63835b0 557
f078f209 558 /*
e8324357
S
559 * limit aggregate size by the minimum rate if rate selected is
560 * not a probe rate, if rate selected is a probe rate then
561 * avoid aggregation of this packet.
f078f209 562 */
e8324357
S
563 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
564 return 0;
f078f209 565
1773912b
VT
566 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
567 aggr_limit = min((max_4ms_framelen * 3) / 8,
568 (u32)ATH_AMPDU_LIMIT_MAX);
569 else
570 aggr_limit = min(max_4ms_framelen,
571 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 572
e8324357
S
573 /*
574 * h/w can accept aggregates upto 16 bit lengths (65535).
575 * The IE, however can hold upto 65536, which shows up here
576 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 577 */
4ef70841
S
578 if (tid->an->maxampdu)
579 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 580
e8324357
S
581 return aggr_limit;
582}
f078f209 583
e8324357 584/*
d43f3015 585 * Returns the number of delimiters to be added to
e8324357 586 * meet the minimum required mpdudensity.
e8324357
S
587 */
588static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
589 struct ath_buf *bf, u16 frmlen)
590{
e8324357
S
591 struct sk_buff *skb = bf->bf_mpdu;
592 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 593 u32 nsymbits, nsymbols;
e8324357 594 u16 minlen;
545750d3 595 u8 flags, rix;
c6663876 596 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
597
598 /* Select standard number of delimiters based on frame length alone */
599 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
600
601 /*
e8324357
S
602 * If encryption enabled, hardware requires some more padding between
603 * subframes.
604 * TODO - this could be improved to be dependent on the rate.
605 * The hardware can keep up at lower rates, but not higher rates
f078f209 606 */
e8324357
S
607 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
608 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 609
e8324357
S
610 /*
611 * Convert desired mpdu density from microeconds to bytes based
612 * on highest rate in rate series (i.e. first rate) to determine
613 * required minimum length for subframe. Take into account
614 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 615 *
e8324357
S
616 * If there is no mpdu density restriction, no further calculation
617 * is needed.
618 */
4ef70841
S
619
620 if (tid->an->mpdudensity == 0)
e8324357 621 return ndelim;
f078f209 622
e8324357
S
623 rix = tx_info->control.rates[0].idx;
624 flags = tx_info->control.rates[0].flags;
e8324357
S
625 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
626 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 627
e8324357 628 if (half_gi)
4ef70841 629 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 630 else
4ef70841 631 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 632
e8324357
S
633 if (nsymbols == 0)
634 nsymbols = 1;
f078f209 635
c6663876
FF
636 streams = HT_RC_2_STREAMS(rix);
637 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 639
e8324357 640 if (frmlen < minlen) {
e8324357
S
641 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
642 ndelim = max(mindelim, ndelim);
f078f209
LR
643 }
644
e8324357 645 return ndelim;
f078f209
LR
646}
647
e8324357 648static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 649 struct ath_txq *txq,
d43f3015
S
650 struct ath_atx_tid *tid,
651 struct list_head *bf_q)
f078f209 652{
e8324357 653#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
654 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
655 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
656 u16 aggr_limit = 0, al = 0, bpad = 0,
657 al_delta, h_baw = tid->baw_size / 2;
658 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
f078f209 659
e8324357 660 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 661
e8324357
S
662 do {
663 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 664
d43f3015 665 /* do not step over block-ack window */
e8324357
S
666 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
667 status = ATH_AGGR_BAW_CLOSED;
668 break;
669 }
f078f209 670
e8324357
S
671 if (!rl) {
672 aggr_limit = ath_lookup_rate(sc, bf, tid);
673 rl = 1;
674 }
f078f209 675
d43f3015 676 /* do not exceed aggregation limit */
e8324357 677 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 678
d43f3015
S
679 if (nframes &&
680 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
681 status = ATH_AGGR_LIMITED;
682 break;
683 }
f078f209 684
d43f3015
S
685 /* do not exceed subframe limit */
686 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
687 status = ATH_AGGR_LIMITED;
688 break;
689 }
d43f3015 690 nframes++;
f078f209 691
d43f3015 692 /* add padding for previous frame to aggregation length */
e8324357 693 al += bpad + al_delta;
f078f209 694
e8324357
S
695 /*
696 * Get the delimiters needed to meet the MPDU
697 * density for this node.
698 */
699 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 700 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 701
e8324357 702 bf->bf_next = NULL;
87d5efbb 703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 704
d43f3015 705 /* link buffers of this frame to the aggregate */
e8324357 706 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
707 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
708 list_move_tail(&bf->list, bf_q);
e8324357
S
709 if (bf_prev) {
710 bf_prev->bf_next = bf;
87d5efbb
VT
711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
712 bf->bf_daddr);
e8324357
S
713 }
714 bf_prev = bf;
fec247c0 715
e8324357 716 } while (!list_empty(&tid->buf_q));
f078f209 717
e8324357
S
718 bf_first->bf_al = al;
719 bf_first->bf_nframes = nframes;
d43f3015 720
e8324357
S
721 return status;
722#undef PADBYTES
723}
f078f209 724
e8324357
S
725static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
726 struct ath_atx_tid *tid)
727{
d43f3015 728 struct ath_buf *bf;
e8324357
S
729 enum ATH_AGGR_STATUS status;
730 struct list_head bf_q;
f078f209 731
e8324357
S
732 do {
733 if (list_empty(&tid->buf_q))
734 return;
f078f209 735
e8324357
S
736 INIT_LIST_HEAD(&bf_q);
737
fec247c0 738 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 739
f078f209 740 /*
d43f3015
S
741 * no frames picked up to be aggregated;
742 * block-ack window is not open.
f078f209 743 */
e8324357
S
744 if (list_empty(&bf_q))
745 break;
f078f209 746
e8324357 747 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 748 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 749
d43f3015 750 /* if only one frame, send as non-aggregate */
e8324357 751 if (bf->bf_nframes == 1) {
e8324357 752 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 753 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
754 ath_buf_set_rate(sc, bf);
755 ath_tx_txqaddbuf(sc, txq, &bf_q);
756 continue;
757 }
f078f209 758
d43f3015 759 /* setup first desc of aggregate */
e8324357
S
760 bf->bf_state.bf_type |= BUF_AGGR;
761 ath_buf_set_rate(sc, bf);
762 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 763
d43f3015
S
764 /* anchor last desc of aggregate */
765 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 766
e8324357 767 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 768 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 769
e8324357
S
770 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
771 status != ATH_AGGR_BAW_CLOSED);
772}
773
f83da965
S
774void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
775 u16 tid, u16 *ssn)
e8324357
S
776{
777 struct ath_atx_tid *txtid;
778 struct ath_node *an;
779
780 an = (struct ath_node *)sta->drv_priv;
f83da965
S
781 txtid = ATH_AN_2_TID(an, tid);
782 txtid->state |= AGGR_ADDBA_PROGRESS;
783 ath_tx_pause_tid(sc, txtid);
784 *ssn = txtid->seq_start;
e8324357 785}
f078f209 786
f83da965 787void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
788{
789 struct ath_node *an = (struct ath_node *)sta->drv_priv;
790 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
791 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
db1a052b 792 struct ath_tx_status ts;
e8324357
S
793 struct ath_buf *bf;
794 struct list_head bf_head;
db1a052b
FF
795
796 memset(&ts, 0, sizeof(ts));
e8324357 797 INIT_LIST_HEAD(&bf_head);
f078f209 798
e8324357 799 if (txtid->state & AGGR_CLEANUP)
f83da965 800 return;
f078f209 801
e8324357 802 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 803 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 804 return;
e8324357 805 }
f078f209 806
e8324357
S
807 ath_tx_pause_tid(sc, txtid);
808
809 /* drop all software retried frames and mark this TID */
810 spin_lock_bh(&txq->axq_lock);
811 while (!list_empty(&txtid->buf_q)) {
812 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
813 if (!bf_isretried(bf)) {
814 /*
815 * NB: it's based on the assumption that
816 * software retried frame will always stay
817 * at the head of software queue.
818 */
819 break;
820 }
d43f3015 821 list_move_tail(&bf->list, &bf_head);
e8324357 822 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
db1a052b 823 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209 824 }
d43f3015 825 spin_unlock_bh(&txq->axq_lock);
f078f209 826
e8324357 827 if (txtid->baw_head != txtid->baw_tail) {
e8324357
S
828 txtid->state |= AGGR_CLEANUP;
829 } else {
830 txtid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 831 ath_tx_flush_tid(sc, txtid);
f078f209 832 }
e8324357 833}
f078f209 834
e8324357
S
835void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
836{
837 struct ath_atx_tid *txtid;
838 struct ath_node *an;
839
840 an = (struct ath_node *)sta->drv_priv;
841
842 if (sc->sc_flags & SC_OP_TXAGGR) {
843 txtid = ATH_AN_2_TID(an, tid);
844 txtid->baw_size =
845 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
846 txtid->state |= AGGR_ADDBA_COMPLETE;
847 txtid->state &= ~AGGR_ADDBA_PROGRESS;
848 ath_tx_resume_tid(sc, txtid);
849 }
f078f209
LR
850}
851
e8324357 852bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
c4288390 853{
e8324357 854 struct ath_atx_tid *txtid;
c4288390 855
e8324357
S
856 if (!(sc->sc_flags & SC_OP_TXAGGR))
857 return false;
c4288390 858
e8324357
S
859 txtid = ATH_AN_2_TID(an, tidno);
860
c3d8f02e 861 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
e8324357 862 return true;
e8324357 863 return false;
c4288390
S
864}
865
e8324357
S
866/********************/
867/* Queue Management */
868/********************/
f078f209 869
e8324357
S
870static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
871 struct ath_txq *txq)
f078f209 872{
e8324357
S
873 struct ath_atx_ac *ac, *ac_tmp;
874 struct ath_atx_tid *tid, *tid_tmp;
f078f209 875
e8324357
S
876 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
877 list_del(&ac->list);
878 ac->sched = false;
879 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
880 list_del(&tid->list);
881 tid->sched = false;
882 ath_tid_drain(sc, txq, tid);
883 }
f078f209
LR
884 }
885}
886
e8324357 887struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 888{
cbe61d8a 889 struct ath_hw *ah = sc->sc_ah;
c46917bb 890 struct ath_common *common = ath9k_hw_common(ah);
e8324357 891 struct ath9k_tx_queue_info qi;
e5003249 892 int qnum, i;
f078f209 893
e8324357
S
894 memset(&qi, 0, sizeof(qi));
895 qi.tqi_subtype = subtype;
896 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
898 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
899 qi.tqi_physCompBuf = 0;
f078f209
LR
900
901 /*
e8324357
S
902 * Enable interrupts only for EOL and DESC conditions.
903 * We mark tx descriptors to receive a DESC interrupt
904 * when a tx queue gets deep; otherwise waiting for the
905 * EOL to reap descriptors. Note that this is done to
906 * reduce interrupt load and this only defers reaping
907 * descriptors, never transmitting frames. Aside from
908 * reducing interrupts this also permits more concurrency.
909 * The only potential downside is if the tx queue backs
910 * up in which case the top half of the kernel may backup
911 * due to a lack of tx descriptors.
912 *
913 * The UAPSD queue is an exception, since we take a desc-
914 * based intr on the EOSP frames.
f078f209 915 */
afe754d6
VT
916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
918 TXQ_FLAG_TXERRINT_ENABLE;
919 } else {
920 if (qtype == ATH9K_TX_QUEUE_UAPSD)
921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
922 else
923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
924 TXQ_FLAG_TXDESCINT_ENABLE;
925 }
e8324357
S
926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
927 if (qnum == -1) {
f078f209 928 /*
e8324357
S
929 * NB: don't print a message, this happens
930 * normally on parts with too few tx queues
f078f209 931 */
e8324357 932 return NULL;
f078f209 933 }
e8324357 934 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
935 ath_print(common, ATH_DBG_FATAL,
936 "qnum %u out of range, max %u!\n",
937 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
938 ath9k_hw_releasetxqueue(ah, qnum);
939 return NULL;
940 }
941 if (!ATH_TXQ_SETUP(sc, qnum)) {
942 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 943
e8324357
S
944 txq->axq_qnum = qnum;
945 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q);
947 INIT_LIST_HEAD(&txq->axq_acq);
948 spin_lock_init(&txq->axq_lock);
949 txq->axq_depth = 0;
164ace38 950 txq->axq_tx_inprogress = false;
e8324357 951 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
952
953 txq->txq_headidx = txq->txq_tailidx = 0;
954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
955 INIT_LIST_HEAD(&txq->txq_fifo[i]);
956 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
957 }
958 return &sc->tx.txq[qnum];
f078f209
LR
959}
960
1773912b 961int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
f078f209 962{
e8324357 963 int qnum;
f078f209 964
e8324357
S
965 switch (qtype) {
966 case ATH9K_TX_QUEUE_DATA:
967 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
968 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
969 "HAL AC %u out of range, max %zu!\n",
970 haltype, ARRAY_SIZE(sc->tx.hwq_map));
e8324357
S
971 return -1;
972 }
973 qnum = sc->tx.hwq_map[haltype];
974 break;
975 case ATH9K_TX_QUEUE_BEACON:
976 qnum = sc->beacon.beaconq;
977 break;
978 case ATH9K_TX_QUEUE_CAB:
979 qnum = sc->beacon.cabq->axq_qnum;
980 break;
981 default:
982 qnum = -1;
983 }
984 return qnum;
985}
f078f209 986
e8324357
S
987int ath_txq_update(struct ath_softc *sc, int qnum,
988 struct ath9k_tx_queue_info *qinfo)
989{
cbe61d8a 990 struct ath_hw *ah = sc->sc_ah;
e8324357
S
991 int error = 0;
992 struct ath9k_tx_queue_info qi;
993
994 if (qnum == sc->beacon.beaconq) {
995 /*
996 * XXX: for beacon queue, we just save the parameter.
997 * It will be picked up by ath_beaconq_config when
998 * it's necessary.
999 */
1000 sc->beacon.beacon_qi = *qinfo;
f078f209 1001 return 0;
e8324357 1002 }
f078f209 1003
9680e8a3 1004 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1005
1006 ath9k_hw_get_txq_props(ah, qnum, &qi);
1007 qi.tqi_aifs = qinfo->tqi_aifs;
1008 qi.tqi_cwmin = qinfo->tqi_cwmin;
1009 qi.tqi_cwmax = qinfo->tqi_cwmax;
1010 qi.tqi_burstTime = qinfo->tqi_burstTime;
1011 qi.tqi_readyTime = qinfo->tqi_readyTime;
1012
1013 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
1014 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1015 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1016 error = -EIO;
1017 } else {
1018 ath9k_hw_resettxqueue(ah, qnum);
1019 }
1020
1021 return error;
1022}
1023
1024int ath_cabq_update(struct ath_softc *sc)
1025{
1026 struct ath9k_tx_queue_info qi;
1027 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1028
e8324357 1029 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1030 /*
e8324357 1031 * Ensure the readytime % is within the bounds.
f078f209 1032 */
17d7904d
S
1033 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1034 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1035 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1036 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1037
57c4d7b4 1038 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1039 sc->config.cabqReadytime) / 100;
e8324357
S
1040 ath_txq_update(sc, qnum, &qi);
1041
1042 return 0;
f078f209
LR
1043}
1044
043a0405
S
1045/*
1046 * Drain a given TX queue (could be Beacon or Data)
1047 *
1048 * This assumes output has been stopped and
1049 * we do not need to block ath_tx_tasklet.
1050 */
1051void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1052{
e8324357
S
1053 struct ath_buf *bf, *lastbf;
1054 struct list_head bf_head;
db1a052b
FF
1055 struct ath_tx_status ts;
1056
1057 memset(&ts, 0, sizeof(ts));
e8324357 1058 INIT_LIST_HEAD(&bf_head);
f078f209 1059
e8324357
S
1060 for (;;) {
1061 spin_lock_bh(&txq->axq_lock);
f078f209 1062
e5003249
VT
1063 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1064 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1065 txq->txq_headidx = txq->txq_tailidx = 0;
1066 spin_unlock_bh(&txq->axq_lock);
1067 break;
1068 } else {
1069 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1070 struct ath_buf, list);
1071 }
1072 } else {
1073 if (list_empty(&txq->axq_q)) {
1074 txq->axq_link = NULL;
1075 spin_unlock_bh(&txq->axq_lock);
1076 break;
1077 }
1078 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1079 list);
f078f209 1080
e5003249
VT
1081 if (bf->bf_stale) {
1082 list_del(&bf->list);
1083 spin_unlock_bh(&txq->axq_lock);
f078f209 1084
0a8cea84 1085 ath_tx_return_buffer(sc, bf);
e5003249
VT
1086 continue;
1087 }
e8324357 1088 }
f078f209 1089
e8324357 1090 lastbf = bf->bf_lastbf;
6d913f7d
VT
1091 if (!retry_tx)
1092 lastbf->bf_tx_aborted = true;
f078f209 1093
e5003249
VT
1094 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1095 list_cut_position(&bf_head,
1096 &txq->txq_fifo[txq->txq_tailidx],
1097 &lastbf->list);
1098 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1099 } else {
1100 /* remove ath_buf's of the same mpdu from txq */
1101 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1102 }
1103
e8324357 1104 txq->axq_depth--;
f078f209 1105
e8324357
S
1106 spin_unlock_bh(&txq->axq_lock);
1107
1108 if (bf_isampdu(bf))
db1a052b 1109 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1110 else
db1a052b 1111 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1112 }
1113
164ace38
SB
1114 spin_lock_bh(&txq->axq_lock);
1115 txq->axq_tx_inprogress = false;
1116 spin_unlock_bh(&txq->axq_lock);
1117
e8324357
S
1118 /* flush any pending frames if aggregation is enabled */
1119 if (sc->sc_flags & SC_OP_TXAGGR) {
1120 if (!retry_tx) {
1121 spin_lock_bh(&txq->axq_lock);
1122 ath_txq_drain_pending_buffers(sc, txq);
1123 spin_unlock_bh(&txq->axq_lock);
1124 }
1125 }
e5003249
VT
1126
1127 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1128 spin_lock_bh(&txq->axq_lock);
1129 while (!list_empty(&txq->txq_fifo_pending)) {
1130 bf = list_first_entry(&txq->txq_fifo_pending,
1131 struct ath_buf, list);
1132 list_cut_position(&bf_head,
1133 &txq->txq_fifo_pending,
1134 &bf->bf_lastbf->list);
1135 spin_unlock_bh(&txq->axq_lock);
1136
1137 if (bf_isampdu(bf))
1138 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1139 &ts, 0);
1140 else
1141 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1142 &ts, 0, 0);
1143 spin_lock_bh(&txq->axq_lock);
1144 }
1145 spin_unlock_bh(&txq->axq_lock);
1146 }
f078f209
LR
1147}
1148
043a0405 1149void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1150{
cbe61d8a 1151 struct ath_hw *ah = sc->sc_ah;
c46917bb 1152 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1153 struct ath_txq *txq;
1154 int i, npend = 0;
1155
1156 if (sc->sc_flags & SC_OP_INVALID)
1157 return;
1158
1159 /* Stop beacon queue */
1160 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1161
1162 /* Stop data queues */
1163 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1164 if (ATH_TXQ_SETUP(sc, i)) {
1165 txq = &sc->tx.txq[i];
1166 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1167 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1168 }
1169 }
1170
1171 if (npend) {
1172 int r;
1173
e8009e98 1174 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1175 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405
S
1176
1177 spin_lock_bh(&sc->sc_resetlock);
e8009e98 1178 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
043a0405 1179 if (r)
c46917bb
LR
1180 ath_print(common, ATH_DBG_FATAL,
1181 "Unable to reset hardware; reset status %d\n",
1182 r);
043a0405
S
1183 spin_unlock_bh(&sc->sc_resetlock);
1184 }
1185
1186 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1187 if (ATH_TXQ_SETUP(sc, i))
1188 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1189 }
e8324357 1190}
f078f209 1191
043a0405 1192void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1193{
043a0405
S
1194 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1195 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1196}
f078f209 1197
e8324357
S
1198void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1199{
1200 struct ath_atx_ac *ac;
1201 struct ath_atx_tid *tid;
f078f209 1202
e8324357
S
1203 if (list_empty(&txq->axq_acq))
1204 return;
f078f209 1205
e8324357
S
1206 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1207 list_del(&ac->list);
1208 ac->sched = false;
f078f209 1209
e8324357
S
1210 do {
1211 if (list_empty(&ac->tid_q))
1212 return;
f078f209 1213
e8324357
S
1214 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1215 list_del(&tid->list);
1216 tid->sched = false;
f078f209 1217
e8324357
S
1218 if (tid->paused)
1219 continue;
f078f209 1220
164ace38 1221 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1222
1223 /*
e8324357
S
1224 * add tid to round-robin queue if more frames
1225 * are pending for the tid
f078f209 1226 */
e8324357
S
1227 if (!list_empty(&tid->buf_q))
1228 ath_tx_queue_tid(txq, tid);
f078f209 1229
e8324357
S
1230 break;
1231 } while (!list_empty(&ac->tid_q));
f078f209 1232
e8324357
S
1233 if (!list_empty(&ac->tid_q)) {
1234 if (!ac->sched) {
1235 ac->sched = true;
1236 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1237 }
e8324357
S
1238 }
1239}
f078f209 1240
e8324357
S
1241int ath_tx_setup(struct ath_softc *sc, int haltype)
1242{
1243 struct ath_txq *txq;
f078f209 1244
e8324357 1245 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
1246 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1247 "HAL AC %u out of range, max %zu!\n",
e8324357
S
1248 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1249 return 0;
1250 }
1251 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1252 if (txq != NULL) {
1253 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1254 return 1;
1255 } else
1256 return 0;
f078f209
LR
1257}
1258
e8324357
S
1259/***********/
1260/* TX, DMA */
1261/***********/
1262
f078f209 1263/*
e8324357
S
1264 * Insert a chain of ath_buf (descriptors) on a txq and
1265 * assume the descriptors are already chained together by caller.
f078f209 1266 */
e8324357
S
1267static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1268 struct list_head *head)
f078f209 1269{
cbe61d8a 1270 struct ath_hw *ah = sc->sc_ah;
c46917bb 1271 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1272 struct ath_buf *bf;
f078f209 1273
e8324357
S
1274 /*
1275 * Insert the frame on the outbound list and
1276 * pass it on to the hardware.
1277 */
f078f209 1278
e8324357
S
1279 if (list_empty(head))
1280 return;
f078f209 1281
e8324357 1282 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1283
c46917bb
LR
1284 ath_print(common, ATH_DBG_QUEUE,
1285 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1286
e5003249
VT
1287 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1288 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1289 list_splice_tail_init(head, &txq->txq_fifo_pending);
1290 return;
1291 }
1292 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1293 ath_print(common, ATH_DBG_XMIT,
1294 "Initializing tx fifo %d which "
1295 "is non-empty\n",
1296 txq->txq_headidx);
1297 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1298 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1299 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1300 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1301 ath_print(common, ATH_DBG_XMIT,
1302 "TXDP[%u] = %llx (%p)\n",
1303 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1304 } else {
e5003249
VT
1305 list_splice_tail_init(head, &txq->axq_q);
1306
1307 if (txq->axq_link == NULL) {
1308 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1309 ath_print(common, ATH_DBG_XMIT,
1310 "TXDP[%u] = %llx (%p)\n",
1311 txq->axq_qnum, ito64(bf->bf_daddr),
1312 bf->bf_desc);
1313 } else {
1314 *txq->axq_link = bf->bf_daddr;
1315 ath_print(common, ATH_DBG_XMIT,
1316 "link[%u] (%p)=%llx (%p)\n",
1317 txq->axq_qnum, txq->axq_link,
1318 ito64(bf->bf_daddr), bf->bf_desc);
1319 }
1320 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1321 &txq->axq_link);
1322 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1323 }
e5003249 1324 txq->axq_depth++;
e8324357 1325}
f078f209 1326
e8324357
S
1327static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1328 struct list_head *bf_head,
1329 struct ath_tx_control *txctl)
f078f209
LR
1330{
1331 struct ath_buf *bf;
f078f209 1332
e8324357
S
1333 bf = list_first_entry(bf_head, struct ath_buf, list);
1334 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1335 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1336
e8324357
S
1337 /*
1338 * Do not queue to h/w when any of the following conditions is true:
1339 * - there are pending frames in software queue
1340 * - the TID is currently paused for ADDBA/BAR request
1341 * - seqno is not within block-ack window
1342 * - h/w queue depth exceeds low water mark
1343 */
1344 if (!list_empty(&tid->buf_q) || tid->paused ||
1345 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1346 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1347 /*
e8324357
S
1348 * Add this frame to software queue for scheduling later
1349 * for aggregation.
f078f209 1350 */
d43f3015 1351 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1352 ath_tx_queue_tid(txctl->txq, tid);
1353 return;
1354 }
1355
1356 /* Add sub-frame to BAW */
1357 ath_tx_addto_baw(sc, tid, bf);
1358
1359 /* Queue to h/w without aggregation */
1360 bf->bf_nframes = 1;
d43f3015 1361 bf->bf_lastbf = bf;
e8324357
S
1362 ath_buf_set_rate(sc, bf);
1363 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1364}
1365
c37452b0
S
1366static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1367 struct ath_atx_tid *tid,
1368 struct list_head *bf_head)
e8324357
S
1369{
1370 struct ath_buf *bf;
1371
e8324357
S
1372 bf = list_first_entry(bf_head, struct ath_buf, list);
1373 bf->bf_state.bf_type &= ~BUF_AMPDU;
1374
1375 /* update starting sequence number for subsequent ADDBA request */
1376 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1377
1378 bf->bf_nframes = 1;
d43f3015 1379 bf->bf_lastbf = bf;
e8324357
S
1380 ath_buf_set_rate(sc, bf);
1381 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1382 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1383}
1384
c37452b0
S
1385static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1386 struct list_head *bf_head)
1387{
1388 struct ath_buf *bf;
1389
1390 bf = list_first_entry(bf_head, struct ath_buf, list);
1391
1392 bf->bf_lastbf = bf;
1393 bf->bf_nframes = 1;
1394 ath_buf_set_rate(sc, bf);
1395 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1396 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1397}
1398
e8324357
S
1399static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1400{
1401 struct ieee80211_hdr *hdr;
1402 enum ath9k_pkt_type htype;
1403 __le16 fc;
1404
1405 hdr = (struct ieee80211_hdr *)skb->data;
1406 fc = hdr->frame_control;
1407
1408 if (ieee80211_is_beacon(fc))
1409 htype = ATH9K_PKT_TYPE_BEACON;
1410 else if (ieee80211_is_probe_resp(fc))
1411 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1412 else if (ieee80211_is_atim(fc))
1413 htype = ATH9K_PKT_TYPE_ATIM;
1414 else if (ieee80211_is_pspoll(fc))
1415 htype = ATH9K_PKT_TYPE_PSPOLL;
1416 else
1417 htype = ATH9K_PKT_TYPE_NORMAL;
1418
1419 return htype;
1420}
1421
e8324357
S
1422static int get_hw_crypto_keytype(struct sk_buff *skb)
1423{
1424 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1425
1426 if (tx_info->control.hw_key) {
1427 if (tx_info->control.hw_key->alg == ALG_WEP)
1428 return ATH9K_KEY_TYPE_WEP;
1429 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1430 return ATH9K_KEY_TYPE_TKIP;
1431 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1432 return ATH9K_KEY_TYPE_AES;
1433 }
1434
1435 return ATH9K_KEY_TYPE_CLEAR;
1436}
1437
1438static void assign_aggr_tid_seqno(struct sk_buff *skb,
1439 struct ath_buf *bf)
1440{
1441 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1442 struct ieee80211_hdr *hdr;
1443 struct ath_node *an;
1444 struct ath_atx_tid *tid;
1445 __le16 fc;
1446 u8 *qc;
1447
1448 if (!tx_info->control.sta)
1449 return;
1450
1451 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1452 hdr = (struct ieee80211_hdr *)skb->data;
1453 fc = hdr->frame_control;
1454
1455 if (ieee80211_is_data_qos(fc)) {
1456 qc = ieee80211_get_qos_ctl(hdr);
1457 bf->bf_tidno = qc[0] & 0xf;
1458 }
1459
1460 /*
1461 * For HT capable stations, we save tidno for later use.
1462 * We also override seqno set by upper layer with the one
1463 * in tx aggregation state.
e8324357
S
1464 */
1465 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1466 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1467 bf->bf_seqno = tid->seq_next;
1468 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1469}
1470
b0a33448 1471static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1472{
1473 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1474 int flags = 0;
1475
1476 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1477 flags |= ATH9K_TXDESC_INTREQ;
1478
1479 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1480 flags |= ATH9K_TXDESC_NOACK;
e8324357 1481
b0a33448
LR
1482 if (use_ldpc)
1483 flags |= ATH9K_TXDESC_LDPC;
1484
e8324357
S
1485 return flags;
1486}
1487
1488/*
1489 * rix - rate index
1490 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1491 * width - 0 for 20 MHz, 1 for 40 MHz
1492 * half_gi - to use 4us v/s 3.6 us for symbol time
1493 */
1494static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1495 int width, int half_gi, bool shortPreamble)
1496{
e8324357 1497 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1498 int streams, pktlen;
1499
1500 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1501
1502 /* find number of symbols: PLCP + data */
c6663876 1503 streams = HT_RC_2_STREAMS(rix);
e8324357 1504 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1505 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1506 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1507
1508 if (!half_gi)
1509 duration = SYMBOL_TIME(nsymbols);
1510 else
1511 duration = SYMBOL_TIME_HALFGI(nsymbols);
1512
1513 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1514 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1515
1516 return duration;
1517}
1518
1519static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1520{
43c27613 1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1522 struct ath9k_11n_rate_series series[4];
1523 struct sk_buff *skb;
1524 struct ieee80211_tx_info *tx_info;
1525 struct ieee80211_tx_rate *rates;
545750d3 1526 const struct ieee80211_rate *rate;
254ad0ff 1527 struct ieee80211_hdr *hdr;
c89424df
S
1528 int i, flags = 0;
1529 u8 rix = 0, ctsrate = 0;
254ad0ff 1530 bool is_pspoll;
e8324357
S
1531
1532 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1533
a22be22a 1534 skb = bf->bf_mpdu;
e8324357
S
1535 tx_info = IEEE80211_SKB_CB(skb);
1536 rates = tx_info->control.rates;
254ad0ff
S
1537 hdr = (struct ieee80211_hdr *)skb->data;
1538 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1539
e8324357 1540 /*
c89424df
S
1541 * We check if Short Preamble is needed for the CTS rate by
1542 * checking the BSS's global flag.
1543 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1544 */
545750d3
FF
1545 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1546 ctsrate = rate->hw_value;
c89424df 1547 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1548 ctsrate |= rate->hw_value_short;
e8324357 1549
e8324357 1550 for (i = 0; i < 4; i++) {
545750d3
FF
1551 bool is_40, is_sgi, is_sp;
1552 int phy;
1553
e8324357
S
1554 if (!rates[i].count || (rates[i].idx < 0))
1555 continue;
1556
1557 rix = rates[i].idx;
e8324357 1558 series[i].Tries = rates[i].count;
43c27613 1559 series[i].ChSel = common->tx_chainmask;
e8324357 1560
27032059
FF
1561 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1562 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1563 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1564 flags |= ATH9K_TXDESC_RTSENA;
1565 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1566 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1567 flags |= ATH9K_TXDESC_CTSENA;
1568 }
1569
c89424df
S
1570 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1571 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1572 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1573 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1574
545750d3
FF
1575 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1576 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1577 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1578
1579 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1580 /* MCS rates */
1581 series[i].Rate = rix | 0x80;
1582 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1583 is_40, is_sgi, is_sp);
074a8c0d
FF
1584 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1585 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1586 continue;
1587 }
1588
1589 /* legcay rates */
1590 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1591 !(rate->flags & IEEE80211_RATE_ERP_G))
1592 phy = WLAN_RC_PHY_CCK;
1593 else
1594 phy = WLAN_RC_PHY_OFDM;
1595
1596 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1597 series[i].Rate = rate->hw_value;
1598 if (rate->hw_value_short) {
1599 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1600 series[i].Rate |= rate->hw_value_short;
1601 } else {
1602 is_sp = false;
1603 }
1604
1605 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1606 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1607 }
1608
27032059
FF
1609 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1610 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1611 flags &= ~ATH9K_TXDESC_RTSENA;
1612
1613 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1614 if (flags & ATH9K_TXDESC_RTSENA)
1615 flags &= ~ATH9K_TXDESC_CTSENA;
1616
e8324357 1617 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1618 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1619 bf->bf_lastbf->bf_desc,
254ad0ff 1620 !is_pspoll, ctsrate,
c89424df 1621 0, series, 4, flags);
f078f209 1622
17d7904d 1623 if (sc->config.ath_aggr_prot && flags)
c89424df 1624 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1625}
1626
c52f33d0 1627static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1628 struct sk_buff *skb,
528f0c6b 1629 struct ath_tx_control *txctl)
f078f209 1630{
c52f33d0
JM
1631 struct ath_wiphy *aphy = hw->priv;
1632 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1633 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1634 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1635 int hdrlen;
1636 __le16 fc;
1bc14880 1637 int padpos, padsize;
b0a33448 1638 bool use_ldpc = false;
e022edbd 1639
827e69bf
FF
1640 tx_info->pad[0] = 0;
1641 switch (txctl->frame_type) {
c81494d5 1642 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1643 break;
c81494d5 1644 case ATH9K_IFT_PAUSE:
827e69bf
FF
1645 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1646 /* fall through */
c81494d5 1647 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1648 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1649 break;
1650 }
528f0c6b
S
1651 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1652 fc = hdr->frame_control;
f078f209 1653
528f0c6b 1654 ATH_TXBUF_RESET(bf);
f078f209 1655
827e69bf 1656 bf->aphy = aphy;
1bc14880
BP
1657 bf->bf_frmlen = skb->len + FCS_LEN;
1658 /* Remove the padding size from bf_frmlen, if any */
1659 padpos = ath9k_cmn_padpos(hdr->frame_control);
1660 padsize = padpos & 3;
1661 if (padsize && skb->len>padpos+padsize) {
1662 bf->bf_frmlen -= padsize;
1663 }
cd3d39a6 1664
b0a33448 1665 if (conf_is_ht(&hw->conf)) {
c656bbb5 1666 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1667 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1668 use_ldpc = true;
1669 }
528f0c6b 1670
b0a33448 1671 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1672
528f0c6b 1673 bf->bf_keytype = get_hw_crypto_keytype(skb);
528f0c6b
S
1674 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1675 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1676 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1677 } else {
1678 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1679 }
1680
17b182e3
S
1681 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1682 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1683 assign_aggr_tid_seqno(skb, bf);
1684
f078f209 1685 bf->bf_mpdu = skb;
f8316df1 1686
7da3c55c
GJ
1687 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1688 skb->len, DMA_TO_DEVICE);
1689 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
f8316df1 1690 bf->bf_mpdu = NULL;
c46917bb
LR
1691 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1692 "dma_mapping_error() on TX\n");
f8316df1
LR
1693 return -ENOMEM;
1694 }
1695
528f0c6b 1696 bf->bf_buf_addr = bf->bf_dmacontext;
e7824a50
LR
1697
1698 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1699 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1700 bf->bf_isnullfunc = true;
1b04b930 1701 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
e7824a50
LR
1702 } else
1703 bf->bf_isnullfunc = false;
1704
7c9fd60f
VT
1705 bf->bf_tx_aborted = false;
1706
f8316df1 1707 return 0;
528f0c6b
S
1708}
1709
1710/* FIXME: tx power */
1711static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1712 struct ath_tx_control *txctl)
1713{
a22be22a 1714 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1715 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1716 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1717 struct ath_node *an = NULL;
1718 struct list_head bf_head;
1719 struct ath_desc *ds;
1720 struct ath_atx_tid *tid;
cbe61d8a 1721 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1722 int frm_type;
c37452b0 1723 __le16 fc;
528f0c6b 1724
528f0c6b 1725 frm_type = get_hw_packet_type(skb);
c37452b0 1726 fc = hdr->frame_control;
528f0c6b
S
1727
1728 INIT_LIST_HEAD(&bf_head);
1729 list_add_tail(&bf->list, &bf_head);
f078f209 1730
f078f209 1731 ds = bf->bf_desc;
87d5efbb 1732 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1733
528f0c6b
S
1734 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1735 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1736
1737 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1738 skb->len, /* segment length */
1739 true, /* first segment */
1740 true, /* last segment */
3f3a1c80 1741 ds, /* first descriptor */
cc610ac0
VT
1742 bf->bf_buf_addr,
1743 txctl->txq->axq_qnum);
f078f209 1744
528f0c6b 1745 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1746
f1617967
JL
1747 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1748 tx_info->control.sta) {
1749 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1750 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1751
c37452b0
S
1752 if (!ieee80211_is_data_qos(fc)) {
1753 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1754 goto tx_done;
1755 }
1756
4fdec031 1757 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1758 /*
1759 * Try aggregation if it's a unicast data frame
1760 * and the destination is HT capable.
1761 */
528f0c6b 1762 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1763 } else {
1764 /*
528f0c6b
S
1765 * Send this frame as regular when ADDBA
1766 * exchange is neither complete nor pending.
f078f209 1767 */
c37452b0
S
1768 ath_tx_send_ht_normal(sc, txctl->txq,
1769 tid, &bf_head);
f078f209
LR
1770 }
1771 } else {
c37452b0 1772 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1773 }
528f0c6b 1774
c37452b0 1775tx_done:
528f0c6b 1776 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1777}
1778
f8316df1 1779/* Upon failure caller should free skb */
c52f33d0 1780int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1781 struct ath_tx_control *txctl)
f078f209 1782{
c52f33d0
JM
1783 struct ath_wiphy *aphy = hw->priv;
1784 struct ath_softc *sc = aphy->sc;
c46917bb 1785 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
84642d6b 1786 struct ath_txq *txq = txctl->txq;
528f0c6b 1787 struct ath_buf *bf;
f8316df1 1788 int r;
f078f209 1789
528f0c6b
S
1790 bf = ath_tx_get_buffer(sc);
1791 if (!bf) {
c46917bb 1792 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1793 return -1;
1794 }
1795
84642d6b
FF
1796 bf->txq = txctl->txq;
1797 spin_lock_bh(&bf->txq->axq_lock);
1798 if (++bf->txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1799 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1800 txq->stopped = 1;
1801 }
1802 spin_unlock_bh(&bf->txq->axq_lock);
1803
c52f33d0 1804 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1805 if (unlikely(r)) {
c46917bb 1806 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1807
1808 /* upon ath_tx_processq() this TX queue will be resumed, we
1809 * guarantee this will happen by knowing beforehand that
1810 * we will at least have to run TX completionon one buffer
1811 * on the queue */
1812 spin_lock_bh(&txq->axq_lock);
84642d6b 1813 if (!txq->stopped && txq->axq_depth > 1) {
f52de03b 1814 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
c112d0c5
LR
1815 txq->stopped = 1;
1816 }
1817 spin_unlock_bh(&txq->axq_lock);
1818
0a8cea84 1819 ath_tx_return_buffer(sc, bf);
c112d0c5 1820
f8316df1
LR
1821 return r;
1822 }
1823
8f93b8b3 1824 ath_tx_start_dma(sc, bf, txctl);
f078f209 1825
528f0c6b 1826 return 0;
f078f209
LR
1827}
1828
c52f33d0 1829void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1830{
c52f33d0
JM
1831 struct ath_wiphy *aphy = hw->priv;
1832 struct ath_softc *sc = aphy->sc;
c46917bb 1833 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1834 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1835 int padpos, padsize;
e8324357
S
1836 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1837 struct ath_tx_control txctl;
f078f209 1838
e8324357 1839 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1840
1841 /*
e8324357
S
1842 * As a temporary workaround, assign seq# here; this will likely need
1843 * to be cleaned up to work better with Beacon transmission and virtual
1844 * BSSes.
f078f209 1845 */
e8324357 1846 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1847 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1848 sc->tx.seq_no += 0x10;
1849 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1850 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1851 }
f078f209 1852
e8324357 1853 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1854 padpos = ath9k_cmn_padpos(hdr->frame_control);
1855 padsize = padpos & 3;
1856 if (padsize && skb->len>padpos) {
e8324357 1857 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1858 ath_print(common, ATH_DBG_XMIT,
1859 "TX CABQ padding failed\n");
e8324357
S
1860 dev_kfree_skb_any(skb);
1861 return;
1862 }
1863 skb_push(skb, padsize);
4d91f9f3 1864 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1865 }
f078f209 1866
e8324357 1867 txctl.txq = sc->beacon.cabq;
f078f209 1868
c46917bb
LR
1869 ath_print(common, ATH_DBG_XMIT,
1870 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1871
c52f33d0 1872 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1873 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1874 goto exit;
f078f209 1875 }
f078f209 1876
e8324357
S
1877 return;
1878exit:
1879 dev_kfree_skb_any(skb);
f078f209
LR
1880}
1881
e8324357
S
1882/*****************/
1883/* TX Completion */
1884/*****************/
528f0c6b 1885
e8324357 1886static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
827e69bf 1887 struct ath_wiphy *aphy, int tx_flags)
528f0c6b 1888{
e8324357
S
1889 struct ieee80211_hw *hw = sc->hw;
1890 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1891 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1892 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1893 int padpos, padsize;
528f0c6b 1894
c46917bb 1895 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1896
827e69bf
FF
1897 if (aphy)
1898 hw = aphy->hw;
528f0c6b 1899
6b2c4032 1900 if (tx_flags & ATH_TX_BAR)
e8324357 1901 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1902
6b2c4032 1903 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1904 /* Frame was ACKed */
1905 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1906 }
1907
4d91f9f3
BP
1908 padpos = ath9k_cmn_padpos(hdr->frame_control);
1909 padsize = padpos & 3;
1910 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1911 /*
1912 * Remove MAC header padding before giving the frame back to
1913 * mac80211.
1914 */
4d91f9f3 1915 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1916 skb_pull(skb, padsize);
1917 }
528f0c6b 1918
1b04b930
S
1919 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1920 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1921 ath_print(common, ATH_DBG_PS,
1922 "Going back to sleep after having "
f643e51d 1923 "received TX status (0x%lx)\n",
1b04b930
S
1924 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1925 PS_WAIT_FOR_CAB |
1926 PS_WAIT_FOR_PSPOLL_DATA |
1927 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1928 }
1929
827e69bf 1930 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1931 ath9k_tx_status(hw, skb);
827e69bf
FF
1932 else
1933 ieee80211_tx_status(hw, skb);
e8324357 1934}
f078f209 1935
e8324357 1936static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1937 struct ath_txq *txq, struct list_head *bf_q,
1938 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1939{
e8324357 1940 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1941 unsigned long flags;
6b2c4032 1942 int tx_flags = 0;
f078f209 1943
e8324357 1944 if (sendbar)
6b2c4032 1945 tx_flags = ATH_TX_BAR;
f078f209 1946
e8324357 1947 if (!txok) {
6b2c4032 1948 tx_flags |= ATH_TX_ERROR;
f078f209 1949
e8324357 1950 if (bf_isxretried(bf))
6b2c4032 1951 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1952 }
1953
84642d6b
FF
1954 if (bf->txq) {
1955 spin_lock_bh(&bf->txq->axq_lock);
1956 bf->txq->pending_frames--;
1957 spin_unlock_bh(&bf->txq->axq_lock);
1958 bf->txq = NULL;
1959 }
1960
e8324357 1961 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
827e69bf 1962 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
db1a052b 1963 ath_debug_stat_tx(sc, txq, bf, ts);
e8324357
S
1964
1965 /*
1966 * Return the list of ath_buf of this mpdu to free queue
1967 */
1968 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1969 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1970 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1971}
1972
e8324357 1973static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1974 struct ath_tx_status *ts, int txok)
f078f209 1975{
e8324357
S
1976 u16 seq_st = 0;
1977 u32 ba[WME_BA_BMP_SIZE >> 5];
1978 int ba_index;
1979 int nbad = 0;
1980 int isaggr = 0;
f078f209 1981
7c9fd60f 1982 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1983 return 0;
f078f209 1984
e8324357
S
1985 isaggr = bf_isaggr(bf);
1986 if (isaggr) {
db1a052b
FF
1987 seq_st = ts->ts_seqnum;
1988 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1989 }
f078f209 1990
e8324357
S
1991 while (bf) {
1992 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1993 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1994 nbad++;
1995
1996 bf = bf->bf_next;
1997 }
f078f209 1998
e8324357
S
1999 return nbad;
2000}
f078f209 2001
db1a052b 2002static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 2003 int nbad, int txok, bool update_rc)
f078f209 2004{
a22be22a 2005 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2006 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2007 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 2008 struct ieee80211_hw *hw = bf->aphy->hw;
8a92e2ee 2009 u8 i, tx_rateindex;
f078f209 2010
95e4acb7 2011 if (txok)
db1a052b 2012 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2013
db1a052b 2014 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2015 WARN_ON(tx_rateindex >= hw->max_rates);
2016
db1a052b 2017 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2018 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
d969847c
FF
2019 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2020 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2021
db1a052b 2022 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 2023 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
254ad0ff 2024 if (ieee80211_is_data(hdr->frame_control)) {
db1a052b 2025 if (ts->ts_flags &
827e69bf
FF
2026 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2027 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
db1a052b
FF
2028 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2029 (ts->ts_status & ATH9K_TXERR_FIFO))
827e69bf
FF
2030 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2031 tx_info->status.ampdu_len = bf->bf_nframes;
2032 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
e8324357 2033 }
f078f209 2034 }
8a92e2ee 2035
545750d3 2036 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2037 tx_info->status.rates[i].count = 0;
545750d3
FF
2038 tx_info->status.rates[i].idx = -1;
2039 }
8a92e2ee
VT
2040
2041 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
f078f209
LR
2042}
2043
059d806c
S
2044static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2045{
2046 int qnum;
2047
2048 spin_lock_bh(&txq->axq_lock);
84642d6b 2049 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
059d806c
S
2050 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
2051 if (qnum != -1) {
f52de03b 2052 ath_mac80211_start_queue(sc, qnum);
059d806c
S
2053 txq->stopped = 0;
2054 }
2055 }
2056 spin_unlock_bh(&txq->axq_lock);
2057}
2058
e8324357 2059static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2060{
cbe61d8a 2061 struct ath_hw *ah = sc->sc_ah;
c46917bb 2062 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2063 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2064 struct list_head bf_head;
e8324357 2065 struct ath_desc *ds;
29bffa96 2066 struct ath_tx_status ts;
0934af23 2067 int txok;
e8324357 2068 int status;
f078f209 2069
c46917bb
LR
2070 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2071 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2072 txq->axq_link);
f078f209 2073
f078f209
LR
2074 for (;;) {
2075 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2076 if (list_empty(&txq->axq_q)) {
2077 txq->axq_link = NULL;
f078f209
LR
2078 spin_unlock_bh(&txq->axq_lock);
2079 break;
2080 }
f078f209
LR
2081 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2082
e8324357
S
2083 /*
2084 * There is a race condition that a BH gets scheduled
2085 * after sw writes TxE and before hw re-load the last
2086 * descriptor to get the newly chained one.
2087 * Software must keep the last DONE descriptor as a
2088 * holding descriptor - software does so by marking
2089 * it with the STALE flag.
2090 */
2091 bf_held = NULL;
a119cc49 2092 if (bf->bf_stale) {
e8324357
S
2093 bf_held = bf;
2094 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2095 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2096 break;
2097 } else {
2098 bf = list_entry(bf_held->list.next,
6ef9b13d 2099 struct ath_buf, list);
e8324357 2100 }
f078f209
LR
2101 }
2102
2103 lastbf = bf->bf_lastbf;
e8324357 2104 ds = lastbf->bf_desc;
f078f209 2105
29bffa96
FF
2106 memset(&ts, 0, sizeof(ts));
2107 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2108 if (status == -EINPROGRESS) {
f078f209 2109 spin_unlock_bh(&txq->axq_lock);
e8324357 2110 break;
f078f209 2111 }
f078f209 2112
e7824a50
LR
2113 /*
2114 * We now know the nullfunc frame has been ACKed so we
2115 * can disable RX.
2116 */
2117 if (bf->bf_isnullfunc &&
29bffa96 2118 (ts.ts_status & ATH9K_TX_ACKED)) {
3f7c5c10
SB
2119 if ((sc->ps_flags & PS_ENABLED))
2120 ath9k_enable_ps(sc);
2121 else
1b04b930 2122 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
e7824a50
LR
2123 }
2124
e8324357
S
2125 /*
2126 * Remove ath_buf's of the same transmit unit from txq,
2127 * however leave the last descriptor back as the holding
2128 * descriptor for hw.
2129 */
a119cc49 2130 lastbf->bf_stale = true;
e8324357 2131 INIT_LIST_HEAD(&bf_head);
e8324357
S
2132 if (!list_is_singular(&lastbf->list))
2133 list_cut_position(&bf_head,
2134 &txq->axq_q, lastbf->list.prev);
f078f209 2135
e8324357 2136 txq->axq_depth--;
29bffa96 2137 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2138 txq->axq_tx_inprogress = false;
0a8cea84
FF
2139 if (bf_held)
2140 list_del(&bf_held->list);
e8324357 2141 spin_unlock_bh(&txq->axq_lock);
f078f209 2142
0a8cea84
FF
2143 if (bf_held)
2144 ath_tx_return_buffer(sc, bf_held);
f078f209 2145
e8324357
S
2146 if (!bf_isampdu(bf)) {
2147 /*
2148 * This frame is sent out as a single frame.
2149 * Use hardware retry status for this frame.
2150 */
29bffa96
FF
2151 bf->bf_retries = ts.ts_longretry;
2152 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2153 bf->bf_state.bf_type |= BUF_XRETRY;
29bffa96 2154 ath_tx_rc_status(bf, &ts, 0, txok, true);
e8324357 2155 }
f078f209 2156
e8324357 2157 if (bf_isampdu(bf))
29bffa96 2158 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2159 else
29bffa96 2160 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2161
059d806c 2162 ath_wake_mac80211_queue(sc, txq);
8469cdef 2163
059d806c 2164 spin_lock_bh(&txq->axq_lock);
e8324357
S
2165 if (sc->sc_flags & SC_OP_TXAGGR)
2166 ath_txq_schedule(sc, txq);
2167 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2168 }
2169}
2170
305fe47f 2171static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2172{
2173 struct ath_softc *sc = container_of(work, struct ath_softc,
2174 tx_complete_work.work);
2175 struct ath_txq *txq;
2176 int i;
2177 bool needreset = false;
2178
2179 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2180 if (ATH_TXQ_SETUP(sc, i)) {
2181 txq = &sc->tx.txq[i];
2182 spin_lock_bh(&txq->axq_lock);
2183 if (txq->axq_depth) {
2184 if (txq->axq_tx_inprogress) {
2185 needreset = true;
2186 spin_unlock_bh(&txq->axq_lock);
2187 break;
2188 } else {
2189 txq->axq_tx_inprogress = true;
2190 }
2191 }
2192 spin_unlock_bh(&txq->axq_lock);
2193 }
2194
2195 if (needreset) {
c46917bb
LR
2196 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2197 "tx hung, resetting the chip\n");
332c5566 2198 ath9k_ps_wakeup(sc);
164ace38 2199 ath_reset(sc, false);
332c5566 2200 ath9k_ps_restore(sc);
164ace38
SB
2201 }
2202
42935eca 2203 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2204 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2205}
2206
2207
f078f209 2208
e8324357 2209void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2210{
e8324357
S
2211 int i;
2212 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2213
e8324357 2214 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2215
e8324357
S
2216 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2217 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2218 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2219 }
2220}
2221
e5003249
VT
2222void ath_tx_edma_tasklet(struct ath_softc *sc)
2223{
2224 struct ath_tx_status txs;
2225 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2226 struct ath_hw *ah = sc->sc_ah;
2227 struct ath_txq *txq;
2228 struct ath_buf *bf, *lastbf;
2229 struct list_head bf_head;
2230 int status;
2231 int txok;
2232
2233 for (;;) {
2234 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2235 if (status == -EINPROGRESS)
2236 break;
2237 if (status == -EIO) {
2238 ath_print(common, ATH_DBG_XMIT,
2239 "Error processing tx status\n");
2240 break;
2241 }
2242
2243 /* Skip beacon completions */
2244 if (txs.qid == sc->beacon.beaconq)
2245 continue;
2246
2247 txq = &sc->tx.txq[txs.qid];
2248
2249 spin_lock_bh(&txq->axq_lock);
2250 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2251 spin_unlock_bh(&txq->axq_lock);
2252 return;
2253 }
2254
2255 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2256 struct ath_buf, list);
2257 lastbf = bf->bf_lastbf;
2258
2259 INIT_LIST_HEAD(&bf_head);
2260 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2261 &lastbf->list);
2262 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2263 txq->axq_depth--;
2264 txq->axq_tx_inprogress = false;
2265 spin_unlock_bh(&txq->axq_lock);
2266
2267 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2268
de0f648d
VT
2269 /*
2270 * Make sure null func frame is acked before configuring
2271 * hw into ps mode.
2272 */
2273 if (bf->bf_isnullfunc && txok) {
2274 if ((sc->ps_flags & PS_ENABLED))
2275 ath9k_enable_ps(sc);
2276 else
2277 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2278 }
2279
e5003249
VT
2280 if (!bf_isampdu(bf)) {
2281 bf->bf_retries = txs.ts_longretry;
2282 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2283 bf->bf_state.bf_type |= BUF_XRETRY;
2284 ath_tx_rc_status(bf, &txs, 0, txok, true);
2285 }
2286
2287 if (bf_isampdu(bf))
2288 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2289 else
2290 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2291 &txs, txok, 0);
2292
7f9f3600
FF
2293 ath_wake_mac80211_queue(sc, txq);
2294
e5003249
VT
2295 spin_lock_bh(&txq->axq_lock);
2296 if (!list_empty(&txq->txq_fifo_pending)) {
2297 INIT_LIST_HEAD(&bf_head);
2298 bf = list_first_entry(&txq->txq_fifo_pending,
2299 struct ath_buf, list);
2300 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2301 &bf->bf_lastbf->list);
2302 ath_tx_txqaddbuf(sc, txq, &bf_head);
2303 } else if (sc->sc_flags & SC_OP_TXAGGR)
2304 ath_txq_schedule(sc, txq);
2305 spin_unlock_bh(&txq->axq_lock);
2306 }
2307}
2308
e8324357
S
2309/*****************/
2310/* Init, Cleanup */
2311/*****************/
f078f209 2312
5088c2f1
VT
2313static int ath_txstatus_setup(struct ath_softc *sc, int size)
2314{
2315 struct ath_descdma *dd = &sc->txsdma;
2316 u8 txs_len = sc->sc_ah->caps.txs_len;
2317
2318 dd->dd_desc_len = size * txs_len;
2319 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2320 &dd->dd_desc_paddr, GFP_KERNEL);
2321 if (!dd->dd_desc)
2322 return -ENOMEM;
2323
2324 return 0;
2325}
2326
2327static int ath_tx_edma_init(struct ath_softc *sc)
2328{
2329 int err;
2330
2331 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2332 if (!err)
2333 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2334 sc->txsdma.dd_desc_paddr,
2335 ATH_TXSTATUS_RING_SIZE);
2336
2337 return err;
2338}
2339
2340static void ath_tx_edma_cleanup(struct ath_softc *sc)
2341{
2342 struct ath_descdma *dd = &sc->txsdma;
2343
2344 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2345 dd->dd_desc_paddr);
2346}
2347
e8324357 2348int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2349{
c46917bb 2350 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2351 int error = 0;
f078f209 2352
797fe5cb 2353 spin_lock_init(&sc->tx.txbuflock);
f078f209 2354
797fe5cb 2355 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2356 "tx", nbufs, 1, 1);
797fe5cb 2357 if (error != 0) {
c46917bb
LR
2358 ath_print(common, ATH_DBG_FATAL,
2359 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2360 goto err;
2361 }
f078f209 2362
797fe5cb 2363 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2364 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2365 if (error != 0) {
c46917bb
LR
2366 ath_print(common, ATH_DBG_FATAL,
2367 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2368 goto err;
2369 }
f078f209 2370
164ace38
SB
2371 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2372
5088c2f1
VT
2373 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2374 error = ath_tx_edma_init(sc);
2375 if (error)
2376 goto err;
2377 }
2378
797fe5cb 2379err:
e8324357
S
2380 if (error != 0)
2381 ath_tx_cleanup(sc);
f078f209 2382
e8324357 2383 return error;
f078f209
LR
2384}
2385
797fe5cb 2386void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2387{
2388 if (sc->beacon.bdma.dd_desc_len != 0)
2389 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2390
2391 if (sc->tx.txdma.dd_desc_len != 0)
2392 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2393
2394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2395 ath_tx_edma_cleanup(sc);
e8324357 2396}
f078f209
LR
2397
2398void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2399{
c5170163
S
2400 struct ath_atx_tid *tid;
2401 struct ath_atx_ac *ac;
2402 int tidno, acno;
f078f209 2403
8ee5afbc 2404 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2405 tidno < WME_NUM_TID;
2406 tidno++, tid++) {
2407 tid->an = an;
2408 tid->tidno = tidno;
2409 tid->seq_start = tid->seq_next = 0;
2410 tid->baw_size = WME_MAX_BA;
2411 tid->baw_head = tid->baw_tail = 0;
2412 tid->sched = false;
e8324357 2413 tid->paused = false;
a37c2c79 2414 tid->state &= ~AGGR_CLEANUP;
c5170163 2415 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2416 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2417 tid->ac = &an->ac[acno];
a37c2c79
S
2418 tid->state &= ~AGGR_ADDBA_COMPLETE;
2419 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2420 }
f078f209 2421
8ee5afbc 2422 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2423 acno < WME_NUM_AC; acno++, ac++) {
2424 ac->sched = false;
2425 INIT_LIST_HEAD(&ac->tid_q);
2426
2427 switch (acno) {
2428 case WME_AC_BE:
2429 ac->qnum = ath_tx_get_qnum(sc,
2430 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2431 break;
2432 case WME_AC_BK:
2433 ac->qnum = ath_tx_get_qnum(sc,
2434 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2435 break;
2436 case WME_AC_VI:
2437 ac->qnum = ath_tx_get_qnum(sc,
2438 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2439 break;
2440 case WME_AC_VO:
2441 ac->qnum = ath_tx_get_qnum(sc,
2442 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2443 break;
f078f209
LR
2444 }
2445 }
2446}
2447
b5aa9bf9 2448void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209
LR
2449{
2450 int i;
2451 struct ath_atx_ac *ac, *ac_tmp;
2452 struct ath_atx_tid *tid, *tid_tmp;
2453 struct ath_txq *txq;
e8324357 2454
f078f209
LR
2455 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2456 if (ATH_TXQ_SETUP(sc, i)) {
b77f483f 2457 txq = &sc->tx.txq[i];
f078f209 2458
a9f042cb 2459 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2460
2461 list_for_each_entry_safe(ac,
2462 ac_tmp, &txq->axq_acq, list) {
2463 tid = list_first_entry(&ac->tid_q,
2464 struct ath_atx_tid, list);
2465 if (tid && tid->an != an)
2466 continue;
2467 list_del(&ac->list);
2468 ac->sched = false;
2469
2470 list_for_each_entry_safe(tid,
2471 tid_tmp, &ac->tid_q, list) {
2472 list_del(&tid->list);
2473 tid->sched = false;
b5aa9bf9 2474 ath_tid_drain(sc, txq, tid);
a37c2c79 2475 tid->state &= ~AGGR_ADDBA_COMPLETE;
a37c2c79 2476 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
2477 }
2478 }
2479
a9f042cb 2480 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2481 }
2482 }
2483}