]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/mac80211/agg-tx.c
mac80211: defer TX agg session teardown to work
[net-next-2.6.git] / net / mac80211 / agg-tx.c
CommitLineData
b8695a8f
JB
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2009, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
5a0e3ad6 17#include <linux/slab.h>
b8695a8f
JB
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
24487981 20#include "driver-ops.h"
b8695a8f
JB
21#include "wme.h"
22
86ab6c5a
JB
23/**
24 * DOC: TX aggregation
25 *
26 * Aggregation on the TX side requires setting the hardware flag
27 * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
28 * hardware parameter to the number of hardware AMPDU queues. If there are no
29 * hardware queues then the driver will (currently) have to do all frame
30 * buffering.
31 *
32 * When TX aggregation is started by some subsystem (usually the rate control
33 * algorithm would be appropriate) by calling the
34 * ieee80211_start_tx_ba_session() function, the driver will be notified via
35 * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
36 *
37 * In response to that, the driver is later required to call the
38 * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
39 * function, which will start the aggregation session.
40 *
41 * Similarly, when the aggregation session is stopped by
42 * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
43 * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
44 * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
45 * (or ieee80211_stop_tx_ba_cb_irqsafe()).
46 */
47
b8695a8f
JB
48static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
49 const u8 *da, u16 tid,
50 u8 dialog_token, u16 start_seq_num,
51 u16 agg_size, u16 timeout)
52{
53 struct ieee80211_local *local = sdata->local;
b8695a8f
JB
54 struct sk_buff *skb;
55 struct ieee80211_mgmt *mgmt;
56 u16 capab;
57
58 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
59
60 if (!skb) {
61 printk(KERN_ERR "%s: failed to allocate buffer "
47846c9b 62 "for addba request frame\n", sdata->name);
b8695a8f
JB
63 return;
64 }
65 skb_reserve(skb, local->hw.extra_tx_headroom);
66 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
67 memset(mgmt, 0, 24);
68 memcpy(mgmt->da, da, ETH_ALEN);
47846c9b 69 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
8abd3f9b
JB
70 if (sdata->vif.type == NL80211_IFTYPE_AP ||
71 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
47846c9b 72 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
46900298
JB
73 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
74 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
b8695a8f
JB
75
76 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
77 IEEE80211_STYPE_ACTION);
78
79 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
80
81 mgmt->u.action.category = WLAN_CATEGORY_BACK;
82 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
83
84 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
85 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
86 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
87 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
88
89 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
90
91 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
92 mgmt->u.action.u.addba_req.start_seq_num =
93 cpu_to_le16(start_seq_num << 4);
94
62ae67be 95 ieee80211_tx_skb(sdata, skb);
b8695a8f
JB
96}
97
98void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
99{
100 struct ieee80211_local *local = sdata->local;
101 struct sk_buff *skb;
102 struct ieee80211_bar *bar;
103 u16 bar_control = 0;
104
105 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
106 if (!skb) {
107 printk(KERN_ERR "%s: failed to allocate buffer for "
47846c9b 108 "bar frame\n", sdata->name);
b8695a8f
JB
109 return;
110 }
111 skb_reserve(skb, local->hw.extra_tx_headroom);
112 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
113 memset(bar, 0, sizeof(*bar));
114 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
115 IEEE80211_STYPE_BACK_REQ);
116 memcpy(bar->ra, ra, ETH_ALEN);
47846c9b 117 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
b8695a8f
JB
118 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
119 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
120 bar_control |= (u16)(tid << 12);
121 bar->control = cpu_to_le16(bar_control);
122 bar->start_seq_num = cpu_to_le16(ssn);
123
62ae67be
JB
124 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
125 ieee80211_tx_skb(sdata, skb);
b8695a8f
JB
126}
127
a622ab72
JB
128static void kfree_tid_tx(struct rcu_head *rcu_head)
129{
130 struct tid_ampdu_tx *tid_tx =
131 container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
132
133 kfree(tid_tx);
134}
135
67c282c0
JB
136int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
137 enum ieee80211_back_parties initiator)
23e6a7ea 138{
849b7967 139 struct ieee80211_local *local = sta->local;
a622ab72 140 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
23e6a7ea 141 int ret;
a622ab72
JB
142
143 lockdep_assert_held(&sta->lock);
144
145 if (WARN_ON(!tid_tx))
146 return -ENOENT;
23e6a7ea 147
0ab33703
JB
148 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
149 /* not even started yet! */
150 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
151 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
152 return 0;
153 }
154
827d42c9
JB
155#ifdef CONFIG_MAC80211_HT_DEBUG
156 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
157 sta->sta.addr, tid);
158#endif /* CONFIG_MAC80211_HT_DEBUG */
159
a622ab72 160 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
23e6a7ea 161
a622ab72
JB
162 /*
163 * After this packets are no longer handed right through
164 * to the driver but are put onto tid_tx->pending instead,
165 * with locking to ensure proper access.
166 */
167 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
736708bd 168
a622ab72 169 tid_tx->stop_initiator = initiator;
23e6a7ea 170
12375ef9 171 ret = drv_ampdu_action(local, sta->sdata,
c951ad35 172 IEEE80211_AMPDU_TX_STOP,
24487981 173 &sta->sta, tid, NULL);
23e6a7ea
JB
174
175 /* HW shall not deny going back to legacy */
176 if (WARN_ON(ret)) {
cd8ffc80
JB
177 /*
178 * We may have pending packets get stuck in this case...
179 * Not bothering with a workaround for now.
180 */
23e6a7ea
JB
181 }
182
183 return ret;
184}
185
b8695a8f
JB
186/*
187 * After sending add Block Ack request we activated a timer until
188 * add Block Ack response will arrive from the recipient.
189 * If this timer expires sta_addba_resp_timer_expired will be executed.
190 */
191static void sta_addba_resp_timer_expired(unsigned long data)
192{
193 /* not an elegant detour, but there is no choice as the timer passes
194 * only one argument, and both sta_info and TID are needed, so init
195 * flow in sta_info_create gives the TID as data, while the timer_to_id
196 * array gives the sta through container_of */
197 u16 tid = *(u8 *)data;
23e6a7ea 198 struct sta_info *sta = container_of((void *)data,
b8695a8f 199 struct sta_info, timer_to_tid[tid]);
a622ab72 200 struct tid_ampdu_tx *tid_tx;
23e6a7ea 201
b8695a8f 202 /* check if the TID waits for addBA response */
83a5cbf7
JB
203 rcu_read_lock();
204 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
a622ab72
JB
205 if (!tid_tx ||
206 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
83a5cbf7 207 rcu_read_unlock();
b8695a8f
JB
208#ifdef CONFIG_MAC80211_HT_DEBUG
209 printk(KERN_DEBUG "timer expired on tid %d but we are not "
67e0f392 210 "(or no longer) expecting addBA response there\n",
8ade0082 211 tid);
b8695a8f 212#endif
23e6a7ea 213 return;
b8695a8f
JB
214 }
215
216#ifdef CONFIG_MAC80211_HT_DEBUG
217 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
218#endif
219
83a5cbf7
JB
220 ieee80211_stop_tx_ba_session(&sta->sta, tid);
221 rcu_read_unlock();
b8695a8f
JB
222}
223
96f5e66e
JB
224static inline int ieee80211_ac_from_tid(int tid)
225{
226 return ieee802_1d_to_ac[tid & 7];
227}
228
a6a67db2
JB
229/*
230 * When multiple aggregation sessions on multiple stations
231 * are being created/destroyed simultaneously, we need to
232 * refcount the global queue stop caused by that in order
233 * to not get into a situation where one of the aggregation
234 * setup or teardown re-enables queues before the other is
235 * ready to handle that.
236 *
237 * These two functions take care of this issue by keeping
238 * a global "agg_queue_stop" refcount.
239 */
240static void __acquires(agg_queue)
241ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
242{
243 int queue = ieee80211_ac_from_tid(tid);
244
245 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
246 ieee80211_stop_queue_by_reason(
247 &local->hw, queue,
248 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
249 __acquire(agg_queue);
250}
251
252static void __releases(agg_queue)
253ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
254{
255 int queue = ieee80211_ac_from_tid(tid);
256
257 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
258 ieee80211_wake_queue_by_reason(
259 &local->hw, queue,
260 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
261 __release(agg_queue);
262}
263
67c282c0 264void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
0ab33703
JB
265{
266 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
267 struct ieee80211_local *local = sta->local;
268 struct ieee80211_sub_if_data *sdata = sta->sdata;
269 u16 start_seq_num;
270 int ret;
271
272 /*
273 * While we're asking the driver about the aggregation,
274 * stop the AC queue so that we don't have to worry
275 * about frames that came in while we were doing that,
276 * which would require us to put them to the AC pending
277 * afterwards which just makes the code more complex.
278 */
279 ieee80211_stop_queue_agg(local, tid);
280
281 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
282
283 /*
284 * This might be off by one due to a race that we can't
285 * really prevent here without synchronize_net() which
286 * can't be called now.
287 */
288 start_seq_num = sta->tid_seq[tid] >> 4;
289
290 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
291 &sta->sta, tid, &start_seq_num);
292 if (ret) {
293#ifdef CONFIG_MAC80211_HT_DEBUG
294 printk(KERN_DEBUG "BA request denied - HW unavailable for"
295 " tid %d\n", tid);
296#endif
297 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
298 ieee80211_wake_queue_agg(local, tid);
299 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
300 return;
301 }
302
303 /* we can take packets again now */
304 ieee80211_wake_queue_agg(local, tid);
305
306 /* activate the timer for the recipient's addBA response */
307 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
308#ifdef CONFIG_MAC80211_HT_DEBUG
309 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
310#endif
311
312 sta->ampdu_mlme.addba_req_num[tid]++;
313
314 /* send AddBA request */
315 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
316 tid_tx->dialog_token, start_seq_num,
317 0x40, 5000);
318}
319
c951ad35 320int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
b8695a8f 321{
c951ad35
JB
322 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
323 struct ieee80211_sub_if_data *sdata = sta->sdata;
324 struct ieee80211_local *local = sdata->local;
a622ab72 325 struct tid_ampdu_tx *tid_tx;
e4e72fb4 326 int ret = 0;
b8695a8f 327
b5878a2d
JB
328 trace_api_start_tx_ba_session(pubsta, tid);
329
23e6a7ea
JB
330 if (WARN_ON(!local->ops->ampdu_action))
331 return -EINVAL;
332
c951ad35
JB
333 if ((tid >= STA_TID_NUM) ||
334 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
b8695a8f
JB
335 return -EINVAL;
336
337#ifdef CONFIG_MAC80211_HT_DEBUG
338 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
c951ad35 339 pubsta->addr, tid);
b8695a8f
JB
340#endif /* CONFIG_MAC80211_HT_DEBUG */
341
8abd3f9b
JB
342 /*
343 * The aggregation code is not prepared to handle
344 * anything but STA/AP due to the BSSID handling.
345 * IBSS could work in the code but isn't supported
346 * by drivers or the standard.
347 */
c951ad35
JB
348 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
349 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
350 sdata->vif.type != NL80211_IFTYPE_AP)
351 return -EINVAL;
8abd3f9b 352
618f356b 353 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
722f069a 354#ifdef CONFIG_MAC80211_HT_DEBUG
2a419056 355 printk(KERN_DEBUG "BA sessions blocked. "
722f069a
S
356 "Denying BA session request\n");
357#endif
c951ad35 358 return -EINVAL;
722f069a
S
359 }
360
b8695a8f
JB
361 spin_lock_bh(&sta->lock);
362
363 /* we have tried too many times, receiver does not want A-MPDU */
364 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
365 ret = -EBUSY;
366 goto err_unlock_sta;
367 }
368
a622ab72 369 tid_tx = sta->ampdu_mlme.tid_tx[tid];
b8695a8f 370 /* check if the TID is not in aggregation flow already */
a622ab72 371 if (tid_tx) {
b8695a8f
JB
372#ifdef CONFIG_MAC80211_HT_DEBUG
373 printk(KERN_DEBUG "BA request denied - session is not "
374 "idle on tid %u\n", tid);
375#endif /* CONFIG_MAC80211_HT_DEBUG */
376 ret = -EAGAIN;
377 goto err_unlock_sta;
378 }
379
380 /* prepare A-MPDU MLME for Tx aggregation */
a622ab72
JB
381 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
382 if (!tid_tx) {
b8695a8f
JB
383#ifdef CONFIG_MAC80211_HT_DEBUG
384 if (net_ratelimit())
385 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
386 tid);
387#endif
388 ret = -ENOMEM;
0ab33703 389 goto err_unlock_sta;
b8695a8f 390 }
96f5e66e 391
a622ab72 392 skb_queue_head_init(&tid_tx->pending);
0ab33703 393 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
cd8ffc80 394
b8695a8f 395 /* Tx timer */
a622ab72
JB
396 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
397 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
398 init_timer(&tid_tx->addba_resp_timer);
b8695a8f 399
0ab33703 400 /* assign a dialog token */
b8695a8f 401 sta->ampdu_mlme.dialog_token_allocator++;
a622ab72 402 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
b8695a8f 403
0ab33703
JB
404 /* finally, assign it to the array */
405 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
51a0d38d 406
0ab33703 407 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
b8695a8f 408
0ab33703 409 /* this flow continues off the work */
96f5e66e 410 err_unlock_sta:
b8695a8f 411 spin_unlock_bh(&sta->lock);
b8695a8f
JB
412 return ret;
413}
414EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
415
cd8ffc80
JB
416/*
417 * splice packets from the STA's pending to the local pending,
a6a67db2 418 * requires a call to ieee80211_agg_splice_finish later
cd8ffc80 419 */
a6a67db2
JB
420static void __acquires(agg_queue)
421ieee80211_agg_splice_packets(struct ieee80211_local *local,
422 struct tid_ampdu_tx *tid_tx, u16 tid)
cd8ffc80 423{
a6a67db2 424 int queue = ieee80211_ac_from_tid(tid);
cd8ffc80 425 unsigned long flags;
cd8ffc80 426
a6a67db2 427 ieee80211_stop_queue_agg(local, tid);
cd8ffc80 428
a622ab72
JB
429 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
430 " from the pending queue\n", tid))
416fbdff
LR
431 return;
432
a622ab72 433 if (!skb_queue_empty(&tid_tx->pending)) {
cd8ffc80 434 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
cd8ffc80 435 /* copy over remaining packets */
a622ab72
JB
436 skb_queue_splice_tail_init(&tid_tx->pending,
437 &local->pending[queue]);
cd8ffc80
JB
438 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
439 }
440}
441
a6a67db2
JB
442static void __releases(agg_queue)
443ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
cd8ffc80 444{
a6a67db2 445 ieee80211_wake_queue_agg(local, tid);
cd8ffc80
JB
446}
447
448/* caller must hold sta->lock */
b1720231
JB
449static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
450 struct sta_info *sta, u16 tid)
451{
a622ab72
JB
452 lockdep_assert_held(&sta->lock);
453
b1720231 454#ifdef CONFIG_MAC80211_HT_DEBUG
55f98938 455 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
b1720231
JB
456#endif
457
a622ab72 458 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
cd8ffc80 459 /*
a622ab72
JB
460 * Now mark as operational. This will be visible
461 * in the TX path, and lets it go lock-free in
462 * the common case.
cd8ffc80 463 */
a622ab72
JB
464 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
465 ieee80211_agg_splice_finish(local, tid);
b1720231 466
12375ef9 467 drv_ampdu_action(local, sta->sdata,
c951ad35 468 IEEE80211_AMPDU_TX_OPERATIONAL,
24487981 469 &sta->sta, tid, NULL);
b1720231
JB
470}
471
c951ad35 472void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
b8695a8f 473{
c951ad35
JB
474 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
475 struct ieee80211_local *local = sdata->local;
b8695a8f 476 struct sta_info *sta;
a622ab72 477 struct tid_ampdu_tx *tid_tx;
b8695a8f 478
b5878a2d
JB
479 trace_api_start_tx_ba_cb(sdata, ra, tid);
480
b8695a8f
JB
481 if (tid >= STA_TID_NUM) {
482#ifdef CONFIG_MAC80211_HT_DEBUG
483 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
484 tid, STA_TID_NUM);
485#endif
486 return;
487 }
488
489 rcu_read_lock();
abe60632 490 sta = sta_info_get(sdata, ra);
b8695a8f
JB
491 if (!sta) {
492 rcu_read_unlock();
493#ifdef CONFIG_MAC80211_HT_DEBUG
494 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
495#endif
496 return;
497 }
498
b8695a8f 499 spin_lock_bh(&sta->lock);
a622ab72 500 tid_tx = sta->ampdu_mlme.tid_tx[tid];
b8695a8f 501
a622ab72 502 if (WARN_ON(!tid_tx)) {
b8695a8f 503#ifdef CONFIG_MAC80211_HT_DEBUG
a622ab72 504 printk(KERN_DEBUG "addBA was not requested!\n");
b8695a8f
JB
505#endif
506 spin_unlock_bh(&sta->lock);
507 rcu_read_unlock();
508 return;
509 }
510
a622ab72 511 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
96f5e66e 512 goto out;
b8695a8f 513
a622ab72 514 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
b1720231 515 ieee80211_agg_tx_operational(local, sta, tid);
96f5e66e
JB
516
517 out:
b8695a8f
JB
518 spin_unlock_bh(&sta->lock);
519 rcu_read_unlock();
520}
b8695a8f 521
c951ad35 522void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
86ab6c5a
JB
523 const u8 *ra, u16 tid)
524{
c951ad35
JB
525 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
526 struct ieee80211_local *local = sdata->local;
86ab6c5a
JB
527 struct ieee80211_ra_tid *ra_tid;
528 struct sk_buff *skb = dev_alloc_skb(0);
529
530 if (unlikely(!skb)) {
531#ifdef CONFIG_MAC80211_HT_DEBUG
532 if (net_ratelimit())
533 printk(KERN_WARNING "%s: Not enough memory, "
47846c9b 534 "dropping start BA session", sdata->name);
86ab6c5a
JB
535#endif
536 return;
537 }
538 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
539 memcpy(&ra_tid->ra, ra, ETH_ALEN);
540 ra_tid->tid = tid;
541
c1475ca9
JB
542 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
543 skb_queue_tail(&sdata->skb_queue, skb);
544 ieee80211_queue_work(&local->hw, &sdata->work);
86ab6c5a
JB
545}
546EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
547
849b7967
JB
548int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
549 enum ieee80211_back_parties initiator)
550{
a622ab72 551 struct tid_ampdu_tx *tid_tx;
849b7967
JB
552 int ret;
553
849b7967 554 spin_lock_bh(&sta->lock);
a622ab72 555 tid_tx = sta->ampdu_mlme.tid_tx[tid];
849b7967 556
0ab33703 557 if (!tid_tx) {
849b7967
JB
558 ret = -ENOENT;
559 goto unlock;
560 }
561
849b7967
JB
562 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
563
564 unlock:
565 spin_unlock_bh(&sta->lock);
566 return ret;
567}
b8695a8f 568
6a8579d0 569int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
b8695a8f 570{
c951ad35
JB
571 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
572 struct ieee80211_sub_if_data *sdata = sta->sdata;
573 struct ieee80211_local *local = sdata->local;
0ab33703
JB
574 struct tid_ampdu_tx *tid_tx;
575 int ret = 0;
b8695a8f 576
6a8579d0 577 trace_api_stop_tx_ba_session(pubsta, tid);
b5878a2d 578
4253119a 579 if (!local->ops->ampdu_action)
23e6a7ea
JB
580 return -EINVAL;
581
b8695a8f
JB
582 if (tid >= STA_TID_NUM)
583 return -EINVAL;
584
0ab33703
JB
585 spin_lock_bh(&sta->lock);
586 tid_tx = sta->ampdu_mlme.tid_tx[tid];
587
588 if (!tid_tx) {
589 ret = -ENOENT;
590 goto unlock;
591 }
592
593 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
594 /* already in progress stopping it */
595 ret = 0;
596 goto unlock;
597 }
598
599 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
600 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
601
602 unlock:
603 spin_unlock_bh(&sta->lock);
604 return ret;
b8695a8f
JB
605}
606EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
607
c951ad35 608void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
b8695a8f 609{
c951ad35
JB
610 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
611 struct ieee80211_local *local = sdata->local;
b8695a8f 612 struct sta_info *sta;
a622ab72 613 struct tid_ampdu_tx *tid_tx;
b8695a8f 614
b5878a2d
JB
615 trace_api_stop_tx_ba_cb(sdata, ra, tid);
616
b8695a8f
JB
617 if (tid >= STA_TID_NUM) {
618#ifdef CONFIG_MAC80211_HT_DEBUG
619 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
620 tid, STA_TID_NUM);
621#endif
622 return;
623 }
624
625#ifdef CONFIG_MAC80211_HT_DEBUG
626 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
627 ra, tid);
628#endif /* CONFIG_MAC80211_HT_DEBUG */
629
630 rcu_read_lock();
abe60632 631 sta = sta_info_get(sdata, ra);
b8695a8f
JB
632 if (!sta) {
633#ifdef CONFIG_MAC80211_HT_DEBUG
634 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
635#endif
636 rcu_read_unlock();
637 return;
638 }
b8695a8f 639
a622ab72
JB
640 spin_lock_bh(&sta->lock);
641 tid_tx = sta->ampdu_mlme.tid_tx[tid];
642
643 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
b8695a8f
JB
644#ifdef CONFIG_MAC80211_HT_DEBUG
645 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
646#endif
a622ab72 647 spin_unlock_bh(&sta->lock);
b8695a8f
JB
648 rcu_read_unlock();
649 return;
650 }
651
a622ab72 652 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
b8695a8f
JB
653 ieee80211_send_delba(sta->sdata, ra, tid,
654 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
655
a622ab72
JB
656 /*
657 * When we get here, the TX path will not be lockless any more wrt.
658 * aggregation, since the OPERATIONAL bit has long been cleared.
659 * Thus it will block on getting the lock, if it occurs. So if we
660 * stop the queue now, we will not get any more packets, and any
661 * that might be being processed will wait for us here, thereby
662 * guaranteeing that no packets go to the tid_tx pending queue any
663 * more.
664 */
b8695a8f 665
a622ab72 666 ieee80211_agg_splice_packets(local, tid_tx, tid);
96f5e66e 667
a622ab72
JB
668 /* future packets must not find the tid_tx struct any more */
669 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
cd8ffc80 670
a622ab72 671 ieee80211_agg_splice_finish(local, tid);
cd8ffc80 672
a622ab72 673 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
b8695a8f 674
a622ab72 675 spin_unlock_bh(&sta->lock);
b8695a8f
JB
676 rcu_read_unlock();
677}
b8695a8f 678
c951ad35 679void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
b8695a8f
JB
680 const u8 *ra, u16 tid)
681{
c951ad35
JB
682 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
683 struct ieee80211_local *local = sdata->local;
b8695a8f
JB
684 struct ieee80211_ra_tid *ra_tid;
685 struct sk_buff *skb = dev_alloc_skb(0);
686
687 if (unlikely(!skb)) {
688#ifdef CONFIG_MAC80211_HT_DEBUG
689 if (net_ratelimit())
690 printk(KERN_WARNING "%s: Not enough memory, "
47846c9b 691 "dropping stop BA session", sdata->name);
b8695a8f
JB
692#endif
693 return;
694 }
695 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
696 memcpy(&ra_tid->ra, ra, ETH_ALEN);
697 ra_tid->tid = tid;
698
c1475ca9
JB
699 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
700 skb_queue_tail(&sdata->skb_queue, skb);
701 ieee80211_queue_work(&local->hw, &sdata->work);
b8695a8f
JB
702}
703EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
704
86ab6c5a 705
b8695a8f
JB
706void ieee80211_process_addba_resp(struct ieee80211_local *local,
707 struct sta_info *sta,
708 struct ieee80211_mgmt *mgmt,
709 size_t len)
710{
a622ab72 711 struct tid_ampdu_tx *tid_tx;
b1720231 712 u16 capab, tid;
b8695a8f
JB
713
714 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
715 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
716
b8695a8f
JB
717 spin_lock_bh(&sta->lock);
718
a622ab72
JB
719 tid_tx = sta->ampdu_mlme.tid_tx[tid];
720
721 if (!tid_tx)
8ade0082 722 goto out;
b8695a8f 723
a622ab72 724 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
b8695a8f
JB
725#ifdef CONFIG_MAC80211_HT_DEBUG
726 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
a622ab72 727#endif
8ade0082 728 goto out;
b8695a8f
JB
729 }
730
a622ab72 731 del_timer(&tid_tx->addba_resp_timer);
8ade0082 732
b8695a8f 733#ifdef CONFIG_MAC80211_HT_DEBUG
55f98938 734 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
a622ab72 735#endif
2171abc5 736
b8695a8f
JB
737 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
738 == WLAN_STATUS_SUCCESS) {
a622ab72
JB
739 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
740 &tid_tx->state)) {
741 /* ignore duplicate response */
742 goto out;
743 }
b8695a8f 744
a622ab72 745 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
b1720231 746 ieee80211_agg_tx_operational(local, sta, tid);
b8695a8f 747
b1720231 748 sta->ampdu_mlme.addba_req_num[tid] = 0;
b8695a8f 749 } else {
849b7967 750 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
b8695a8f 751 }
2171abc5 752
2171abc5 753 out:
849b7967 754 spin_unlock_bh(&sta->lock);
b8695a8f 755}