]>
Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
cee075a2 | 2 | * Copyright (c) 2008-2009 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
394cf0a1 | 17 | #include "ath9k.h" |
f078f209 | 18 | |
b5c80475 FF |
19 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) |
20 | ||
bce048d7 JM |
21 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, |
22 | struct ieee80211_hdr *hdr) | |
23 | { | |
c52f33d0 JM |
24 | struct ieee80211_hw *hw = sc->pri_wiphy->hw; |
25 | int i; | |
26 | ||
27 | spin_lock_bh(&sc->wiphy_lock); | |
28 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
29 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
30 | if (aphy == NULL) | |
31 | continue; | |
32 | if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) | |
33 | == 0) { | |
34 | hw = aphy->hw; | |
35 | break; | |
36 | } | |
37 | } | |
38 | spin_unlock_bh(&sc->wiphy_lock); | |
39 | return hw; | |
bce048d7 JM |
40 | } |
41 | ||
f078f209 LR |
42 | /* |
43 | * Setup and link descriptors. | |
44 | * | |
45 | * 11N: we can no longer afford to self link the last descriptor. | |
46 | * MAC acknowledges BA status as long as it copies frames to host | |
47 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
48 | * to a sender if last desc is self-linked. | |
f078f209 | 49 | */ |
f078f209 LR |
50 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
51 | { | |
cbe61d8a | 52 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 53 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
54 | struct ath_desc *ds; |
55 | struct sk_buff *skb; | |
56 | ||
57 | ATH_RXBUF_RESET(bf); | |
58 | ||
59 | ds = bf->bf_desc; | |
be0418ad | 60 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
61 | ds->ds_data = bf->bf_buf_addr; |
62 | ||
be0418ad | 63 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 64 | skb = bf->bf_mpdu; |
9680e8a3 | 65 | BUG_ON(skb == NULL); |
f078f209 LR |
66 | ds->ds_vdata = skb->data; |
67 | ||
cc861f74 LR |
68 | /* |
69 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 70 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
71 | * to process |
72 | */ | |
b77f483f | 73 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 74 | common->rx_bufsize, |
f078f209 LR |
75 | 0); |
76 | ||
b77f483f | 77 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
78 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
79 | else | |
b77f483f | 80 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 81 | |
b77f483f | 82 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
83 | ath9k_hw_rxena(ah); |
84 | } | |
85 | ||
ff37e337 S |
86 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
87 | { | |
88 | /* XXX block beacon interrupts */ | |
89 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
90 | sc->rx.defant = antenna; |
91 | sc->rx.rxotherant = 0; | |
ff37e337 S |
92 | } |
93 | ||
f078f209 LR |
94 | static void ath_opmode_init(struct ath_softc *sc) |
95 | { | |
cbe61d8a | 96 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
97 | struct ath_common *common = ath9k_hw_common(ah); |
98 | ||
f078f209 LR |
99 | u32 rfilt, mfilt[2]; |
100 | ||
101 | /* configure rx filter */ | |
102 | rfilt = ath_calcrxfilter(sc); | |
103 | ath9k_hw_setrxfilter(ah, rfilt); | |
104 | ||
105 | /* configure bssid mask */ | |
2660b81a | 106 | if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) |
13b81559 | 107 | ath_hw_setbssidmask(common); |
f078f209 LR |
108 | |
109 | /* configure operational mode */ | |
110 | ath9k_hw_setopmode(ah); | |
111 | ||
112 | /* Handle any link-level address change. */ | |
1510718d | 113 | ath9k_hw_setmac(ah, common->macaddr); |
f078f209 LR |
114 | |
115 | /* calculate and install multicast filter */ | |
116 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 117 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
118 | } |
119 | ||
b5c80475 FF |
120 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
121 | enum ath9k_rx_qtype qtype) | |
f078f209 | 122 | { |
b5c80475 FF |
123 | struct ath_hw *ah = sc->sc_ah; |
124 | struct ath_rx_edma *rx_edma; | |
f078f209 LR |
125 | struct sk_buff *skb; |
126 | struct ath_buf *bf; | |
f078f209 | 127 | |
b5c80475 FF |
128 | rx_edma = &sc->rx.rx_edma[qtype]; |
129 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
130 | return false; | |
f078f209 | 131 | |
b5c80475 FF |
132 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
133 | list_del_init(&bf->list); | |
f078f209 | 134 | |
b5c80475 FF |
135 | skb = bf->bf_mpdu; |
136 | ||
137 | ATH_RXBUF_RESET(bf); | |
138 | memset(skb->data, 0, ah->caps.rx_status_len); | |
139 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
140 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 141 | |
b5c80475 FF |
142 | SKB_CB_ATHBUF(skb) = bf; |
143 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
144 | skb_queue_tail(&rx_edma->rx_fifo, skb); | |
f078f209 | 145 | |
b5c80475 FF |
146 | return true; |
147 | } | |
148 | ||
149 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
150 | enum ath9k_rx_qtype qtype, int size) | |
151 | { | |
152 | struct ath_rx_edma *rx_edma; | |
153 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
154 | u32 nbuf = 0; | |
155 | ||
156 | rx_edma = &sc->rx.rx_edma[qtype]; | |
157 | if (list_empty(&sc->rx.rxbuf)) { | |
158 | ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); | |
159 | return; | |
797fe5cb | 160 | } |
f078f209 | 161 | |
b5c80475 FF |
162 | while (!list_empty(&sc->rx.rxbuf)) { |
163 | nbuf++; | |
164 | ||
165 | if (!ath_rx_edma_buf_link(sc, qtype)) | |
166 | break; | |
167 | ||
168 | if (nbuf >= size) | |
169 | break; | |
170 | } | |
171 | } | |
172 | ||
173 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
174 | enum ath9k_rx_qtype qtype) | |
175 | { | |
176 | struct ath_buf *bf; | |
177 | struct ath_rx_edma *rx_edma; | |
178 | struct sk_buff *skb; | |
179 | ||
180 | rx_edma = &sc->rx.rx_edma[qtype]; | |
181 | ||
182 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | |
183 | bf = SKB_CB_ATHBUF(skb); | |
184 | BUG_ON(!bf); | |
185 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
186 | } | |
187 | } | |
188 | ||
189 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
190 | { | |
191 | struct ath_buf *bf; | |
192 | ||
193 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
194 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
195 | ||
797fe5cb | 196 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
b5c80475 FF |
197 | if (bf->bf_mpdu) |
198 | dev_kfree_skb_any(bf->bf_mpdu); | |
199 | } | |
200 | ||
201 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
202 | ||
203 | kfree(sc->rx.rx_bufptr); | |
204 | sc->rx.rx_bufptr = NULL; | |
205 | } | |
206 | ||
207 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
208 | { | |
209 | skb_queue_head_init(&rx_edma->rx_fifo); | |
210 | skb_queue_head_init(&rx_edma->rx_buffers); | |
211 | rx_edma->rx_fifo_hwsize = size; | |
212 | } | |
213 | ||
214 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
215 | { | |
216 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
217 | struct ath_hw *ah = sc->sc_ah; | |
218 | struct sk_buff *skb; | |
219 | struct ath_buf *bf; | |
220 | int error = 0, i; | |
221 | u32 size; | |
222 | ||
223 | ||
224 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + | |
225 | ah->caps.rx_status_len, | |
226 | min(common->cachelsz, (u16)64)); | |
227 | ||
228 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - | |
229 | ah->caps.rx_status_len); | |
230 | ||
231 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
232 | ah->caps.rx_lp_qdepth); | |
233 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
234 | ah->caps.rx_hp_qdepth); | |
235 | ||
236 | size = sizeof(struct ath_buf) * nbufs; | |
237 | bf = kzalloc(size, GFP_KERNEL); | |
238 | if (!bf) | |
239 | return -ENOMEM; | |
240 | ||
241 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
242 | sc->rx.rx_bufptr = bf; | |
243 | ||
244 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 245 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 246 | if (!skb) { |
797fe5cb | 247 | error = -ENOMEM; |
b5c80475 | 248 | goto rx_init_fail; |
f078f209 | 249 | } |
f078f209 | 250 | |
b5c80475 | 251 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 252 | bf->bf_mpdu = skb; |
b5c80475 | 253 | |
797fe5cb | 254 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 255 | common->rx_bufsize, |
b5c80475 | 256 | DMA_BIDIRECTIONAL); |
797fe5cb | 257 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
258 | bf->bf_buf_addr))) { |
259 | dev_kfree_skb_any(skb); | |
260 | bf->bf_mpdu = NULL; | |
261 | ath_print(common, ATH_DBG_FATAL, | |
262 | "dma_mapping_error() on RX init\n"); | |
263 | error = -ENOMEM; | |
264 | goto rx_init_fail; | |
265 | } | |
266 | ||
267 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
268 | } | |
269 | ||
270 | return 0; | |
271 | ||
272 | rx_init_fail: | |
273 | ath_rx_edma_cleanup(sc); | |
274 | return error; | |
275 | } | |
276 | ||
277 | static void ath_edma_start_recv(struct ath_softc *sc) | |
278 | { | |
279 | spin_lock_bh(&sc->rx.rxbuflock); | |
280 | ||
281 | ath9k_hw_rxena(sc->sc_ah); | |
282 | ||
283 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | |
284 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | |
285 | ||
286 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | |
287 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | |
288 | ||
289 | spin_unlock_bh(&sc->rx.rxbuflock); | |
290 | ||
291 | ath_opmode_init(sc); | |
292 | ||
293 | ath9k_hw_startpcureceive(sc->sc_ah); | |
294 | } | |
295 | ||
296 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
297 | { | |
298 | spin_lock_bh(&sc->rx.rxbuflock); | |
299 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
300 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
301 | spin_unlock_bh(&sc->rx.rxbuflock); | |
302 | } | |
303 | ||
304 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
305 | { | |
306 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
307 | struct sk_buff *skb; | |
308 | struct ath_buf *bf; | |
309 | int error = 0; | |
310 | ||
311 | spin_lock_init(&sc->rx.rxflushlock); | |
312 | sc->sc_flags &= ~SC_OP_RXFLUSH; | |
313 | spin_lock_init(&sc->rx.rxbuflock); | |
314 | ||
315 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | |
316 | return ath_rx_edma_init(sc, nbufs); | |
317 | } else { | |
318 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | |
319 | min(common->cachelsz, (u16)64)); | |
320 | ||
321 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | |
322 | common->cachelsz, common->rx_bufsize); | |
323 | ||
324 | /* Initialize rx descriptors */ | |
325 | ||
326 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
327 | "rx", nbufs, 1); | |
328 | if (error != 0) { | |
c46917bb | 329 | ath_print(common, ATH_DBG_FATAL, |
b5c80475 FF |
330 | "failed to allocate rx descriptors: %d\n", |
331 | error); | |
797fe5cb S |
332 | goto err; |
333 | } | |
b5c80475 FF |
334 | |
335 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
336 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
337 | GFP_KERNEL); | |
338 | if (skb == NULL) { | |
339 | error = -ENOMEM; | |
340 | goto err; | |
341 | } | |
342 | ||
343 | bf->bf_mpdu = skb; | |
344 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
345 | common->rx_bufsize, | |
346 | DMA_FROM_DEVICE); | |
347 | if (unlikely(dma_mapping_error(sc->dev, | |
348 | bf->bf_buf_addr))) { | |
349 | dev_kfree_skb_any(skb); | |
350 | bf->bf_mpdu = NULL; | |
351 | ath_print(common, ATH_DBG_FATAL, | |
352 | "dma_mapping_error() on RX init\n"); | |
353 | error = -ENOMEM; | |
354 | goto err; | |
355 | } | |
356 | bf->bf_dmacontext = bf->bf_buf_addr; | |
357 | } | |
358 | sc->rx.rxlink = NULL; | |
797fe5cb | 359 | } |
f078f209 | 360 | |
797fe5cb | 361 | err: |
f078f209 LR |
362 | if (error) |
363 | ath_rx_cleanup(sc); | |
364 | ||
365 | return error; | |
366 | } | |
367 | ||
f078f209 LR |
368 | void ath_rx_cleanup(struct ath_softc *sc) |
369 | { | |
cc861f74 LR |
370 | struct ath_hw *ah = sc->sc_ah; |
371 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 LR |
372 | struct sk_buff *skb; |
373 | struct ath_buf *bf; | |
374 | ||
b5c80475 FF |
375 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
376 | ath_rx_edma_cleanup(sc); | |
377 | return; | |
378 | } else { | |
379 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
380 | skb = bf->bf_mpdu; | |
381 | if (skb) { | |
382 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
383 | common->rx_bufsize, | |
384 | DMA_FROM_DEVICE); | |
385 | dev_kfree_skb(skb); | |
386 | } | |
051b9191 | 387 | } |
f078f209 | 388 | |
b5c80475 FF |
389 | if (sc->rx.rxdma.dd_desc_len != 0) |
390 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | |
391 | } | |
f078f209 LR |
392 | } |
393 | ||
394 | /* | |
395 | * Calculate the receive filter according to the | |
396 | * operating mode and state: | |
397 | * | |
398 | * o always accept unicast, broadcast, and multicast traffic | |
399 | * o maintain current state of phy error reception (the hal | |
400 | * may enable phy error frames for noise immunity work) | |
401 | * o probe request frames are accepted only when operating in | |
402 | * hostap, adhoc, or monitor modes | |
403 | * o enable promiscuous mode according to the interface state | |
404 | * o accept beacons: | |
405 | * - when operating in adhoc mode so the 802.11 layer creates | |
406 | * node table entries for peers, | |
407 | * - when operating in station mode for collecting rssi data when | |
408 | * the station is otherwise quiet, or | |
409 | * - when operating as a repeater so we see repeater-sta beacons | |
410 | * - when scanning | |
411 | */ | |
412 | ||
413 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
414 | { | |
415 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 416 | |
f078f209 LR |
417 | u32 rfilt; |
418 | ||
419 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
420 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
421 | | ATH9K_RX_FILTER_MCAST; | |
422 | ||
423 | /* If not a STA, enable processing of Probe Requests */ | |
2660b81a | 424 | if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) |
f078f209 LR |
425 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
426 | ||
217ba9da JM |
427 | /* |
428 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
429 | * mode interface or when in monitor mode. AP mode does not need this | |
430 | * since it receives all in-BSS frames anyway. | |
431 | */ | |
2660b81a | 432 | if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && |
b77f483f | 433 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
217ba9da | 434 | (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) |
f078f209 | 435 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 436 | |
d42c6b71 S |
437 | if (sc->rx.rxfilter & FIF_CONTROL) |
438 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
439 | ||
dbaaa147 VT |
440 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
441 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) | |
442 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
443 | else | |
f078f209 LR |
444 | rfilt |= ATH9K_RX_FILTER_BEACON; |
445 | ||
66afad01 SB |
446 | if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || |
447 | AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && | |
448 | (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && | |
449 | (sc->rx.rxfilter & FIF_PSPOLL)) | |
dbaaa147 | 450 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 451 | |
7ea310be S |
452 | if (conf_is_ht(&sc->hw->conf)) |
453 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
454 | ||
5eb6ba83 | 455 | if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
b93bce2a JM |
456 | /* TODO: only needed if more than one BSSID is in use in |
457 | * station/adhoc mode */ | |
5eb6ba83 JC |
458 | /* The following may also be needed for other older chips */ |
459 | if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) | |
460 | rfilt |= ATH9K_RX_FILTER_PROM; | |
b93bce2a JM |
461 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
462 | } | |
463 | ||
f078f209 | 464 | return rfilt; |
7dcfdcd9 | 465 | |
f078f209 LR |
466 | #undef RX_FILTER_PRESERVE |
467 | } | |
468 | ||
f078f209 LR |
469 | int ath_startrecv(struct ath_softc *sc) |
470 | { | |
cbe61d8a | 471 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
472 | struct ath_buf *bf, *tbf; |
473 | ||
b5c80475 FF |
474 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
475 | ath_edma_start_recv(sc); | |
476 | return 0; | |
477 | } | |
478 | ||
b77f483f S |
479 | spin_lock_bh(&sc->rx.rxbuflock); |
480 | if (list_empty(&sc->rx.rxbuf)) | |
f078f209 LR |
481 | goto start_recv; |
482 | ||
b77f483f S |
483 | sc->rx.rxlink = NULL; |
484 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
485 | ath_rx_buf_link(sc, bf); |
486 | } | |
487 | ||
488 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 489 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
490 | goto start_recv; |
491 | ||
b77f483f | 492 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 493 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 494 | ath9k_hw_rxena(ah); |
f078f209 LR |
495 | |
496 | start_recv: | |
b77f483f | 497 | spin_unlock_bh(&sc->rx.rxbuflock); |
be0418ad S |
498 | ath_opmode_init(sc); |
499 | ath9k_hw_startpcureceive(ah); | |
500 | ||
f078f209 LR |
501 | return 0; |
502 | } | |
503 | ||
f078f209 LR |
504 | bool ath_stoprecv(struct ath_softc *sc) |
505 | { | |
cbe61d8a | 506 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
507 | bool stopped; |
508 | ||
be0418ad S |
509 | ath9k_hw_stoppcurecv(ah); |
510 | ath9k_hw_setrxfilter(ah, 0); | |
511 | stopped = ath9k_hw_stopdmarecv(ah); | |
b5c80475 FF |
512 | |
513 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
514 | ath_edma_stop_recv(sc); | |
515 | else | |
516 | sc->rx.rxlink = NULL; | |
be0418ad | 517 | |
f078f209 LR |
518 | return stopped; |
519 | } | |
520 | ||
f078f209 LR |
521 | void ath_flushrecv(struct ath_softc *sc) |
522 | { | |
b77f483f | 523 | spin_lock_bh(&sc->rx.rxflushlock); |
98deeea0 | 524 | sc->sc_flags |= SC_OP_RXFLUSH; |
b5c80475 FF |
525 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
526 | ath_rx_tasklet(sc, 1, true); | |
527 | ath_rx_tasklet(sc, 1, false); | |
98deeea0 | 528 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
b77f483f | 529 | spin_unlock_bh(&sc->rx.rxflushlock); |
f078f209 LR |
530 | } |
531 | ||
cc65965c JM |
532 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
533 | { | |
534 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
535 | struct ieee80211_mgmt *mgmt; | |
536 | u8 *pos, *end, id, elen; | |
537 | struct ieee80211_tim_ie *tim; | |
538 | ||
539 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
540 | pos = mgmt->u.beacon.variable; | |
541 | end = skb->data + skb->len; | |
542 | ||
543 | while (pos + 2 < end) { | |
544 | id = *pos++; | |
545 | elen = *pos++; | |
546 | if (pos + elen > end) | |
547 | break; | |
548 | ||
549 | if (id == WLAN_EID_TIM) { | |
550 | if (elen < sizeof(*tim)) | |
551 | break; | |
552 | tim = (struct ieee80211_tim_ie *) pos; | |
553 | if (tim->dtim_count != 0) | |
554 | break; | |
555 | return tim->bitmap_ctrl & 0x01; | |
556 | } | |
557 | ||
558 | pos += elen; | |
559 | } | |
560 | ||
561 | return false; | |
562 | } | |
563 | ||
cc65965c JM |
564 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
565 | { | |
566 | struct ieee80211_mgmt *mgmt; | |
1510718d | 567 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
568 | |
569 | if (skb->len < 24 + 8 + 2 + 2) | |
570 | return; | |
571 | ||
572 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
1510718d | 573 | if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) |
cc65965c JM |
574 | return; /* not from our current AP */ |
575 | ||
1b04b930 | 576 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 577 | |
1b04b930 S |
578 | if (sc->ps_flags & PS_BEACON_SYNC) { |
579 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
c46917bb LR |
580 | ath_print(common, ATH_DBG_PS, |
581 | "Reconfigure Beacon timers based on " | |
582 | "timestamp from the AP\n"); | |
ccdfeab6 JM |
583 | ath_beacon_config(sc, NULL); |
584 | } | |
585 | ||
cc65965c JM |
586 | if (ath_beacon_dtim_pending_cab(skb)) { |
587 | /* | |
588 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
589 | * frames. If the last broadcast/multicast frame is not |
590 | * received properly, the next beacon frame will work as | |
591 | * a backup trigger for returning into NETWORK SLEEP state, | |
592 | * so we are waiting for it as well. | |
cc65965c | 593 | */ |
c46917bb LR |
594 | ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " |
595 | "buffered broadcast/multicast frame(s)\n"); | |
1b04b930 | 596 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
597 | return; |
598 | } | |
599 | ||
1b04b930 | 600 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
601 | /* |
602 | * This can happen if a broadcast frame is dropped or the AP | |
603 | * fails to send a frame indicating that all CAB frames have | |
604 | * been delivered. | |
605 | */ | |
1b04b930 | 606 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
607 | ath_print(common, ATH_DBG_PS, |
608 | "PS wait for CAB frames timed out\n"); | |
cc65965c | 609 | } |
cc65965c JM |
610 | } |
611 | ||
612 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) | |
613 | { | |
614 | struct ieee80211_hdr *hdr; | |
c46917bb | 615 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
616 | |
617 | hdr = (struct ieee80211_hdr *)skb->data; | |
618 | ||
619 | /* Process Beacon and CAB receive in PS state */ | |
1b04b930 | 620 | if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && |
9a23f9ca | 621 | ieee80211_is_beacon(hdr->frame_control)) |
cc65965c | 622 | ath_rx_ps_beacon(sc, skb); |
1b04b930 | 623 | else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
cc65965c JM |
624 | (ieee80211_is_data(hdr->frame_control) || |
625 | ieee80211_is_action(hdr->frame_control)) && | |
626 | is_multicast_ether_addr(hdr->addr1) && | |
627 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
628 | /* |
629 | * No more broadcast/multicast frames to be received at this | |
630 | * point. | |
631 | */ | |
1b04b930 | 632 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
633 | ath_print(common, ATH_DBG_PS, |
634 | "All PS CAB frames received, back to sleep\n"); | |
1b04b930 | 635 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
636 | !is_multicast_ether_addr(hdr->addr1) && |
637 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 638 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
c46917bb LR |
639 | ath_print(common, ATH_DBG_PS, |
640 | "Going back to sleep after having received " | |
f643e51d | 641 | "PS-Poll data (0x%lx)\n", |
1b04b930 S |
642 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
643 | PS_WAIT_FOR_CAB | | |
644 | PS_WAIT_FOR_PSPOLL_DATA | | |
645 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
646 | } |
647 | } | |
648 | ||
b4afffc0 LR |
649 | static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, |
650 | struct ath_softc *sc, struct sk_buff *skb, | |
5ca42627 | 651 | struct ieee80211_rx_status *rxs) |
9d64a3cf JM |
652 | { |
653 | struct ieee80211_hdr *hdr; | |
654 | ||
655 | hdr = (struct ieee80211_hdr *)skb->data; | |
656 | ||
657 | /* Send the frame to mac80211 */ | |
658 | if (is_multicast_ether_addr(hdr->addr1)) { | |
659 | int i; | |
660 | /* | |
661 | * Deliver broadcast/multicast frames to all suitable | |
662 | * virtual wiphys. | |
663 | */ | |
664 | /* TODO: filter based on channel configuration */ | |
665 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
666 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
667 | struct sk_buff *nskb; | |
668 | if (aphy == NULL) | |
669 | continue; | |
670 | nskb = skb_copy(skb, GFP_ATOMIC); | |
5ca42627 LR |
671 | if (!nskb) |
672 | continue; | |
673 | ieee80211_rx(aphy->hw, nskb); | |
9d64a3cf | 674 | } |
f1d58c25 | 675 | ieee80211_rx(sc->hw, skb); |
5ca42627 | 676 | } else |
9d64a3cf | 677 | /* Deliver unicast frames based on receiver address */ |
b4afffc0 | 678 | ieee80211_rx(hw, skb); |
9d64a3cf JM |
679 | } |
680 | ||
b5c80475 FF |
681 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
682 | enum ath9k_rx_qtype qtype) | |
f078f209 | 683 | { |
b5c80475 FF |
684 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
685 | struct ath_hw *ah = sc->sc_ah; | |
686 | struct ath_common *common = ath9k_hw_common(ah); | |
687 | struct sk_buff *skb; | |
688 | struct ath_buf *bf; | |
689 | int ret; | |
690 | ||
691 | skb = skb_peek(&rx_edma->rx_fifo); | |
692 | if (!skb) | |
693 | return false; | |
694 | ||
695 | bf = SKB_CB_ATHBUF(skb); | |
696 | BUG_ON(!bf); | |
697 | ||
698 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
699 | common->rx_bufsize, DMA_FROM_DEVICE); | |
700 | ||
701 | ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); | |
702 | if (ret == -EINPROGRESS) | |
703 | return false; | |
704 | ||
705 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
706 | if (ret == -EINVAL) { | |
707 | /* corrupt descriptor, skip this one and the following one */ | |
708 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
709 | ath_rx_edma_buf_link(sc, qtype); | |
710 | skb = skb_peek(&rx_edma->rx_fifo); | |
711 | if (!skb) | |
712 | return true; | |
713 | ||
714 | bf = SKB_CB_ATHBUF(skb); | |
715 | BUG_ON(!bf); | |
716 | ||
717 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
718 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
719 | ath_rx_edma_buf_link(sc, qtype); | |
720 | } | |
721 | skb_queue_tail(&rx_edma->rx_buffers, skb); | |
722 | ||
723 | return true; | |
724 | } | |
f078f209 | 725 | |
b5c80475 FF |
726 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
727 | struct ath_rx_status *rs, | |
728 | enum ath9k_rx_qtype qtype) | |
729 | { | |
730 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; | |
731 | struct sk_buff *skb; | |
be0418ad | 732 | struct ath_buf *bf; |
b5c80475 FF |
733 | |
734 | while (ath_edma_get_buffers(sc, qtype)); | |
735 | skb = __skb_dequeue(&rx_edma->rx_buffers); | |
736 | if (!skb) | |
737 | return NULL; | |
738 | ||
739 | bf = SKB_CB_ATHBUF(skb); | |
740 | ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); | |
741 | return bf; | |
742 | } | |
743 | ||
744 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | |
745 | struct ath_rx_status *rs) | |
746 | { | |
747 | struct ath_hw *ah = sc->sc_ah; | |
748 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 749 | struct ath_desc *ds; |
b5c80475 FF |
750 | struct ath_buf *bf; |
751 | int ret; | |
752 | ||
753 | if (list_empty(&sc->rx.rxbuf)) { | |
754 | sc->rx.rxlink = NULL; | |
755 | return NULL; | |
756 | } | |
757 | ||
758 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | |
759 | ds = bf->bf_desc; | |
760 | ||
761 | /* | |
762 | * Must provide the virtual address of the current | |
763 | * descriptor, the physical address, and the virtual | |
764 | * address of the next descriptor in the h/w chain. | |
765 | * This allows the HAL to look ahead to see if the | |
766 | * hardware is done with a descriptor by checking the | |
767 | * done bit in the following descriptor and the address | |
768 | * of the current descriptor the DMA engine is working | |
769 | * on. All this is necessary because of our use of | |
770 | * a self-linked list to avoid rx overruns. | |
771 | */ | |
772 | ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); | |
773 | if (ret == -EINPROGRESS) { | |
774 | struct ath_rx_status trs; | |
775 | struct ath_buf *tbf; | |
776 | struct ath_desc *tds; | |
777 | ||
778 | memset(&trs, 0, sizeof(trs)); | |
779 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
780 | sc->rx.rxlink = NULL; | |
781 | return NULL; | |
782 | } | |
783 | ||
784 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
785 | ||
786 | /* | |
787 | * On some hardware the descriptor status words could | |
788 | * get corrupted, including the done bit. Because of | |
789 | * this, check if the next descriptor's done bit is | |
790 | * set or not. | |
791 | * | |
792 | * If the next descriptor's done bit is set, the current | |
793 | * descriptor has been corrupted. Force s/w to discard | |
794 | * this descriptor and continue... | |
795 | */ | |
796 | ||
797 | tds = tbf->bf_desc; | |
798 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | |
799 | if (ret == -EINPROGRESS) | |
800 | return NULL; | |
801 | } | |
802 | ||
803 | if (!bf->bf_mpdu) | |
804 | return bf; | |
805 | ||
806 | /* | |
807 | * Synchronize the DMA transfer with CPU before | |
808 | * 1. accessing the frame | |
809 | * 2. requeueing the same buffer to h/w | |
810 | */ | |
811 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
812 | common->rx_bufsize, | |
813 | DMA_FROM_DEVICE); | |
814 | ||
815 | return bf; | |
816 | } | |
817 | ||
818 | ||
819 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) | |
820 | { | |
821 | struct ath_buf *bf; | |
cb71d9ba | 822 | struct sk_buff *skb = NULL, *requeue_skb; |
5ca42627 | 823 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 824 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 825 | struct ath_common *common = ath9k_hw_common(ah); |
b4afffc0 LR |
826 | /* |
827 | * The hw can techncically differ from common->hw when using ath9k | |
828 | * virtual wiphy so to account for that we iterate over the active | |
829 | * wiphys and find the appropriate wiphy and therefore hw. | |
830 | */ | |
831 | struct ieee80211_hw *hw = NULL; | |
be0418ad | 832 | struct ieee80211_hdr *hdr; |
c9b14170 | 833 | int retval; |
be0418ad | 834 | bool decrypt_error = false; |
29bffa96 | 835 | struct ath_rx_status rs; |
b5c80475 FF |
836 | enum ath9k_rx_qtype qtype; |
837 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
838 | int dma_type; | |
be0418ad | 839 | |
b5c80475 FF |
840 | if (edma) |
841 | dma_type = DMA_FROM_DEVICE; | |
842 | else | |
843 | dma_type = DMA_BIDIRECTIONAL; | |
844 | ||
845 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
b77f483f | 846 | spin_lock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
847 | |
848 | do { | |
849 | /* If handling rx interrupt and flush is in progress => exit */ | |
98deeea0 | 850 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
f078f209 LR |
851 | break; |
852 | ||
29bffa96 | 853 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
854 | if (edma) |
855 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
856 | else | |
857 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 858 | |
b5c80475 FF |
859 | if (!bf) |
860 | break; | |
f078f209 | 861 | |
f078f209 | 862 | skb = bf->bf_mpdu; |
be0418ad | 863 | if (!skb) |
f078f209 | 864 | continue; |
f078f209 | 865 | |
b4afffc0 | 866 | hdr = (struct ieee80211_hdr *) skb->data; |
5ca42627 LR |
867 | rxs = IEEE80211_SKB_RXCB(skb); |
868 | ||
b4afffc0 LR |
869 | hw = ath_get_virt_hw(sc, hdr); |
870 | ||
29bffa96 | 871 | ath_debug_stat_rx(sc, &rs); |
1395d3f0 | 872 | |
f078f209 | 873 | /* |
be0418ad S |
874 | * If we're asked to flush receive queue, directly |
875 | * chain it back at the queue without processing it. | |
f078f209 | 876 | */ |
be0418ad | 877 | if (flush) |
cb71d9ba | 878 | goto requeue; |
f078f209 | 879 | |
29bffa96 | 880 | retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs, |
db86f07e | 881 | rxs, &decrypt_error); |
1e875e9f | 882 | if (retval) |
cb71d9ba LR |
883 | goto requeue; |
884 | ||
885 | /* Ensure we always have an skb to requeue once we are done | |
886 | * processing the current buffer's skb */ | |
cc861f74 | 887 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
888 | |
889 | /* If there is no memory we ignore the current RX'd frame, | |
890 | * tell hardware it can give us a new frame using the old | |
b77f483f | 891 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba LR |
892 | * processing. */ |
893 | if (!requeue_skb) | |
894 | goto requeue; | |
f078f209 | 895 | |
9bf9fca8 | 896 | /* Unmap the frame */ |
7da3c55c | 897 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 898 | common->rx_bufsize, |
b5c80475 | 899 | dma_type); |
f078f209 | 900 | |
b5c80475 FF |
901 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
902 | if (ah->caps.rx_status_len) | |
903 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 904 | |
29bffa96 | 905 | ath9k_cmn_rx_skb_postprocess(common, skb, &rs, |
db86f07e | 906 | rxs, decrypt_error); |
be0418ad | 907 | |
cb71d9ba LR |
908 | /* We will now give hardware our shiny new allocated skb */ |
909 | bf->bf_mpdu = requeue_skb; | |
7da3c55c | 910 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
cc861f74 | 911 | common->rx_bufsize, |
b5c80475 | 912 | dma_type); |
7da3c55c | 913 | if (unlikely(dma_mapping_error(sc->dev, |
f8316df1 LR |
914 | bf->bf_buf_addr))) { |
915 | dev_kfree_skb_any(requeue_skb); | |
916 | bf->bf_mpdu = NULL; | |
c46917bb LR |
917 | ath_print(common, ATH_DBG_FATAL, |
918 | "dma_mapping_error() on RX\n"); | |
5ca42627 | 919 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
f8316df1 LR |
920 | break; |
921 | } | |
cb71d9ba | 922 | bf->bf_dmacontext = bf->bf_buf_addr; |
f078f209 LR |
923 | |
924 | /* | |
925 | * change the default rx antenna if rx diversity chooses the | |
926 | * other antenna 3 times in a row. | |
927 | */ | |
29bffa96 | 928 | if (sc->rx.defant != rs.rs_antenna) { |
b77f483f | 929 | if (++sc->rx.rxotherant >= 3) |
29bffa96 | 930 | ath_setdefantenna(sc, rs.rs_antenna); |
f078f209 | 931 | } else { |
b77f483f | 932 | sc->rx.rxotherant = 0; |
f078f209 | 933 | } |
3cbb5dd7 | 934 | |
1b04b930 S |
935 | if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | |
936 | PS_WAIT_FOR_CAB | | |
937 | PS_WAIT_FOR_PSPOLL_DATA))) | |
cc65965c JM |
938 | ath_rx_ps(sc, skb); |
939 | ||
5ca42627 | 940 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
cc65965c | 941 | |
cb71d9ba | 942 | requeue: |
b5c80475 FF |
943 | if (edma) { |
944 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
945 | ath_rx_edma_buf_link(sc, qtype); | |
946 | } else { | |
947 | list_move_tail(&bf->list, &sc->rx.rxbuf); | |
948 | ath_rx_buf_link(sc, bf); | |
949 | } | |
be0418ad S |
950 | } while (1); |
951 | ||
b77f483f | 952 | spin_unlock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
953 | |
954 | return 0; | |
f078f209 | 955 | } |