]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/wireless/ath9k/recv.c
nl80211: Change max TX power to be in mBm instead of dBm
[net-next-2.6.git] / drivers / net / wireless / ath9k / recv.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
f078f209
LR
17#include "core.h"
18
19/*
20 * Setup and link descriptors.
21 *
22 * 11N: we can no longer afford to self link the last descriptor.
23 * MAC acknowledges BA status as long as it copies frames to host
24 * buffer (or rx fifo). This can incorrectly acknowledge packets
25 * to a sender if last desc is self-linked.
f078f209 26 */
f078f209
LR
27static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
28{
29 struct ath_hal *ah = sc->sc_ah;
30 struct ath_desc *ds;
31 struct sk_buff *skb;
32
33 ATH_RXBUF_RESET(bf);
34
35 ds = bf->bf_desc;
be0418ad 36 ds->ds_link = 0; /* link to null */
f078f209
LR
37 ds->ds_data = bf->bf_buf_addr;
38
be0418ad 39 /* virtual addr of the beginning of the buffer. */
f078f209
LR
40 skb = bf->bf_mpdu;
41 ASSERT(skb != NULL);
42 ds->ds_vdata = skb->data;
43
44 /* setup rx descriptors */
be0418ad
S
45 ath9k_hw_setuprxdesc(ah, ds,
46 skb_tailroom(skb), /* buffer size */
f078f209
LR
47 0);
48
49 if (sc->sc_rxlink == NULL)
50 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
51 else
52 *sc->sc_rxlink = bf->bf_daddr;
53
54 sc->sc_rxlink = &ds->ds_link;
55 ath9k_hw_rxena(ah);
56}
57
be0418ad 58static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
f078f209
LR
59{
60 struct sk_buff *skb;
61 u32 off;
62
63 /*
64 * Cache-line-align. This is important (for the
65 * 5210 at least) as not doing so causes bogus data
66 * in rx'd frames.
67 */
68
69 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
70 if (skb != NULL) {
71 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
72 if (off != 0)
73 skb_reserve(skb, sc->sc_cachelsz - off);
74 } else {
75 DPRINTF(sc, ATH_DBG_FATAL,
76 "%s: skbuff alloc of size %u failed\n",
77 __func__, len);
78 return NULL;
79 }
80
81 return skb;
82}
83
be0418ad
S
84static int ath_rate2idx(struct ath_softc *sc, int rate)
85{
86 int i = 0, cur_band, n_rates;
87 struct ieee80211_hw *hw = sc->hw;
88
89 cur_band = hw->conf.channel->band;
90 n_rates = sc->sbands[cur_band].n_bitrates;
91
92 for (i = 0; i < n_rates; i++) {
93 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
94 break;
95 }
96
97 /*
98 * NB:mac80211 validates rx rate index against the supported legacy rate
99 * index only (should be done against ht rates also), return the highest
100 * legacy rate index for rx rate which does not match any one of the
101 * supported basic and extended rates to make mac80211 happy.
102 * The following hack will be cleaned up once the issue with
103 * the rx rate index validation in mac80211 is fixed.
104 */
105 if (i == n_rates)
106 return n_rates - 1;
107
108 return i;
f078f209
LR
109}
110
111/*
be0418ad
S
112 * For Decrypt or Demic errors, we only mark packet status here and always push
113 * up the frame up to let mac80211 handle the actual error case, be it no
114 * decryption key or real decryption error. This let us keep statistics there.
f078f209 115 */
be0418ad
S
116static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
117 struct ieee80211_rx_status *rx_status, bool *decrypt_error,
118 struct ath_softc *sc)
f078f209 119{
e63835b0 120 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
be0418ad 121 struct ieee80211_hdr *hdr;
e63835b0 122 int ratekbps, rix;
be0418ad
S
123 u8 ratecode;
124 __le16 fc;
125
126 hdr = (struct ieee80211_hdr *)skb->data;
127 fc = hdr->frame_control;
128 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
129
130 if (ds->ds_rxstat.rs_more) {
131 /*
132 * Frame spans multiple descriptors; this cannot happen yet
133 * as we don't support jumbograms. If not in monitor mode,
134 * discard the frame. Enable this if you want to see
135 * error frames in Monitor mode.
136 */
137 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
138 goto rx_next;
139 } else if (ds->ds_rxstat.rs_status != 0) {
140 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
141 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
142 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
143 goto rx_next;
f078f209 144
be0418ad
S
145 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
146 *decrypt_error = true;
147 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
148 if (ieee80211_is_ctl(fc))
149 /*
150 * Sometimes, we get invalid
151 * MIC failures on valid control frames.
152 * Remove these mic errors.
153 */
154 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
155 else
156 rx_status->flag |= RX_FLAG_MMIC_ERROR;
157 }
158 /*
159 * Reject error frames with the exception of
160 * decryption and MIC failures. For monitor mode,
161 * we also ignore the CRC error.
162 */
163 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
164 if (ds->ds_rxstat.rs_status &
165 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
166 ATH9K_RXERR_CRC))
167 goto rx_next;
168 } else {
169 if (ds->ds_rxstat.rs_status &
170 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
171 goto rx_next;
172 }
173 }
f078f209
LR
174 }
175
be0418ad 176 ratecode = ds->ds_rxstat.rs_rate;
e63835b0
S
177 rix = rate_table->rateCodeToIndex[ratecode];
178 ratekbps = rate_table->info[rix].ratekbps;
be0418ad
S
179
180 /* HT rate */
181 if (ratecode & 0x80) {
182 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
183 ratekbps = (ratekbps * 27) / 13;
184 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
185 ratekbps = (ratekbps * 10) / 9;
186 }
187
188 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
189 rx_status->band = sc->hw->conf.channel->band;
190 rx_status->freq = sc->hw->conf.channel->center_freq;
191 rx_status->noise = sc->sc_ani.sc_noise_floor;
192 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
193 rx_status->rate_idx = ath_rate2idx(sc, (ratekbps / 100));
194 rx_status->antenna = ds->ds_rxstat.rs_antenna;
195
196 /* at 45 you will be able to use MCS 15 reliably. A more elaborate
197 * scheme can be used here but it requires tables of SNR/throughput for
198 * each possible mode used. */
199 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45;
200
201 /* rssi can be more than 45 though, anything above that
202 * should be considered at 100% */
203 if (rx_status->qual > 100)
204 rx_status->qual = 100;
205
206 rx_status->flag |= RX_FLAG_TSFT;
207
208 return 1;
209rx_next:
210 return 0;
f078f209
LR
211}
212
213static void ath_opmode_init(struct ath_softc *sc)
214{
215 struct ath_hal *ah = sc->sc_ah;
216 u32 rfilt, mfilt[2];
217
218 /* configure rx filter */
219 rfilt = ath_calcrxfilter(sc);
220 ath9k_hw_setrxfilter(ah, rfilt);
221
222 /* configure bssid mask */
60b67f51 223 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
f078f209
LR
224 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
225
226 /* configure operational mode */
227 ath9k_hw_setopmode(ah);
228
229 /* Handle any link-level address change. */
230 ath9k_hw_setmac(ah, sc->sc_myaddr);
231
232 /* calculate and install multicast filter */
233 mfilt[0] = mfilt[1] = ~0;
234
235 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
236 DPRINTF(sc, ATH_DBG_CONFIG ,
237 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
238 __func__, rfilt, mfilt[0], mfilt[1]);
239}
240
241int ath_rx_init(struct ath_softc *sc, int nbufs)
242{
243 struct sk_buff *skb;
244 struct ath_buf *bf;
245 int error = 0;
246
247 do {
248 spin_lock_init(&sc->sc_rxflushlock);
98deeea0 249 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
250 spin_lock_init(&sc->sc_rxbuflock);
251
f078f209
LR
252 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
253 min(sc->sc_cachelsz,
254 (u16)64));
255
256 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
257 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
258
259 /* Initialize rx descriptors */
260
261 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
262 "rx", nbufs, 1);
263 if (error != 0) {
264 DPRINTF(sc, ATH_DBG_FATAL,
265 "%s: failed to allocate rx descriptors: %d\n",
266 __func__, error);
267 break;
268 }
269
f078f209
LR
270 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
271 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
272 if (skb == NULL) {
273 error = -ENOMEM;
274 break;
275 }
276
277 bf->bf_mpdu = skb;
927e70e9
S
278 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
279 skb_end_pointer(skb) - skb->head,
280 PCI_DMA_FROMDEVICE);
281 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
282 }
283 sc->sc_rxlink = NULL;
284
285 } while (0);
286
287 if (error)
288 ath_rx_cleanup(sc);
289
290 return error;
291}
292
f078f209
LR
293void ath_rx_cleanup(struct ath_softc *sc)
294{
295 struct sk_buff *skb;
296 struct ath_buf *bf;
297
298 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
299 skb = bf->bf_mpdu;
300 if (skb)
301 dev_kfree_skb(skb);
302 }
303
f078f209
LR
304 if (sc->sc_rxdma.dd_desc_len != 0)
305 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
306}
307
308/*
309 * Calculate the receive filter according to the
310 * operating mode and state:
311 *
312 * o always accept unicast, broadcast, and multicast traffic
313 * o maintain current state of phy error reception (the hal
314 * may enable phy error frames for noise immunity work)
315 * o probe request frames are accepted only when operating in
316 * hostap, adhoc, or monitor modes
317 * o enable promiscuous mode according to the interface state
318 * o accept beacons:
319 * - when operating in adhoc mode so the 802.11 layer creates
320 * node table entries for peers,
321 * - when operating in station mode for collecting rssi data when
322 * the station is otherwise quiet, or
323 * - when operating as a repeater so we see repeater-sta beacons
324 * - when scanning
325 */
326
327u32 ath_calcrxfilter(struct ath_softc *sc)
328{
329#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
7dcfdcd9 330
f078f209
LR
331 u32 rfilt;
332
333 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
334 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
335 | ATH9K_RX_FILTER_MCAST;
336
337 /* If not a STA, enable processing of Probe Requests */
b4696c8b 338 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
f078f209
LR
339 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
340
341 /* Can't set HOSTAP into promiscous mode */
b4696c8b 342 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
7dcfdcd9 343 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
b4696c8b 344 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
f078f209
LR
345 rfilt |= ATH9K_RX_FILTER_PROM;
346 /* ??? To prevent from sending ACK */
347 rfilt &= ~ATH9K_RX_FILTER_UCAST;
348 }
349
ffb82676 350 if (sc->sc_ah->ah_opmode == ATH9K_M_STA ||
be0418ad 351 sc->sc_ah->ah_opmode == ATH9K_M_IBSS)
f078f209
LR
352 rfilt |= ATH9K_RX_FILTER_BEACON;
353
354 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
355 & beacon frames */
b4696c8b 356 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
f078f209 357 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
be0418ad 358
f078f209 359 return rfilt;
7dcfdcd9 360
f078f209
LR
361#undef RX_FILTER_PRESERVE
362}
363
f078f209
LR
364int ath_startrecv(struct ath_softc *sc)
365{
366 struct ath_hal *ah = sc->sc_ah;
367 struct ath_buf *bf, *tbf;
368
369 spin_lock_bh(&sc->sc_rxbuflock);
370 if (list_empty(&sc->sc_rxbuf))
371 goto start_recv;
372
373 sc->sc_rxlink = NULL;
374 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
f078f209
LR
375 ath_rx_buf_link(sc, bf);
376 }
377
378 /* We could have deleted elements so the list may be empty now */
379 if (list_empty(&sc->sc_rxbuf))
380 goto start_recv;
381
382 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
383 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
be0418ad 384 ath9k_hw_rxena(ah);
f078f209
LR
385
386start_recv:
387 spin_unlock_bh(&sc->sc_rxbuflock);
be0418ad
S
388 ath_opmode_init(sc);
389 ath9k_hw_startpcureceive(ah);
390
f078f209
LR
391 return 0;
392}
393
f078f209
LR
394bool ath_stoprecv(struct ath_softc *sc)
395{
396 struct ath_hal *ah = sc->sc_ah;
f078f209
LR
397 bool stopped;
398
be0418ad
S
399 ath9k_hw_stoppcurecv(ah);
400 ath9k_hw_setrxfilter(ah, 0);
401 stopped = ath9k_hw_stopdmarecv(ah);
402 mdelay(3); /* 3ms is long enough for 1 frame */
403 sc->sc_rxlink = NULL;
404
f078f209
LR
405 return stopped;
406}
407
f078f209
LR
408void ath_flushrecv(struct ath_softc *sc)
409{
f078f209 410 spin_lock_bh(&sc->sc_rxflushlock);
98deeea0 411 sc->sc_flags |= SC_OP_RXFLUSH;
f078f209 412 ath_rx_tasklet(sc, 1);
98deeea0 413 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
414 spin_unlock_bh(&sc->sc_rxflushlock);
415}
416
f078f209
LR
417int ath_rx_tasklet(struct ath_softc *sc, int flush)
418{
419#define PA2DESC(_sc, _pa) \
420 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
421 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
422
be0418ad 423 struct ath_buf *bf;
f078f209 424 struct ath_desc *ds;
cb71d9ba 425 struct sk_buff *skb = NULL, *requeue_skb;
be0418ad 426 struct ieee80211_rx_status rx_status;
f078f209 427 struct ath_hal *ah = sc->sc_ah;
be0418ad
S
428 struct ieee80211_hdr *hdr;
429 int hdrlen, padsize, retval;
430 bool decrypt_error = false;
431 u8 keyix;
432
433 spin_lock_bh(&sc->sc_rxbuflock);
f078f209
LR
434
435 do {
436 /* If handling rx interrupt and flush is in progress => exit */
98deeea0 437 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
f078f209
LR
438 break;
439
f078f209
LR
440 if (list_empty(&sc->sc_rxbuf)) {
441 sc->sc_rxlink = NULL;
f078f209
LR
442 break;
443 }
444
445 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
f078f209 446 ds = bf->bf_desc;
f078f209
LR
447
448 /*
449 * Must provide the virtual address of the current
450 * descriptor, the physical address, and the virtual
451 * address of the next descriptor in the h/w chain.
452 * This allows the HAL to look ahead to see if the
453 * hardware is done with a descriptor by checking the
454 * done bit in the following descriptor and the address
455 * of the current descriptor the DMA engine is working
456 * on. All this is necessary because of our use of
457 * a self-linked list to avoid rx overruns.
458 */
be0418ad 459 retval = ath9k_hw_rxprocdesc(ah, ds,
f078f209
LR
460 bf->bf_daddr,
461 PA2DESC(sc, ds->ds_link),
462 0);
463 if (retval == -EINPROGRESS) {
464 struct ath_buf *tbf;
465 struct ath_desc *tds;
466
467 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
be0418ad 468 sc->sc_rxlink = NULL;
f078f209
LR
469 break;
470 }
471
472 tbf = list_entry(bf->list.next, struct ath_buf, list);
473
474 /*
475 * On some hardware the descriptor status words could
476 * get corrupted, including the done bit. Because of
477 * this, check if the next descriptor's done bit is
478 * set or not.
479 *
480 * If the next descriptor's done bit is set, the current
481 * descriptor has been corrupted. Force s/w to discard
482 * this descriptor and continue...
483 */
484
485 tds = tbf->bf_desc;
be0418ad
S
486 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
487 PA2DESC(sc, tds->ds_link), 0);
f078f209 488 if (retval == -EINPROGRESS) {
f078f209
LR
489 break;
490 }
491 }
492
f078f209 493 skb = bf->bf_mpdu;
be0418ad 494 if (!skb)
f078f209 495 continue;
f078f209 496
f078f209 497 /*
be0418ad
S
498 * If we're asked to flush receive queue, directly
499 * chain it back at the queue without processing it.
f078f209 500 */
be0418ad 501 if (flush)
cb71d9ba 502 goto requeue;
f078f209 503
be0418ad 504 if (!ds->ds_rxstat.rs_datalen)
cb71d9ba 505 goto requeue;
f078f209 506
be0418ad 507 /* The status portion of the descriptor could get corrupted. */
f078f209 508 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
cb71d9ba 509 goto requeue;
f078f209 510
be0418ad 511 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
cb71d9ba
LR
512 goto requeue;
513
514 /* Ensure we always have an skb to requeue once we are done
515 * processing the current buffer's skb */
516 requeue_skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
517
518 /* If there is no memory we ignore the current RX'd frame,
519 * tell hardware it can give us a new frame using the old
520 * skb and put it at the tail of the sc->sc_rxbuf list for
521 * processing. */
522 if (!requeue_skb)
523 goto requeue;
f078f209 524
be0418ad
S
525 /* Sync and unmap the frame */
526 pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr,
f078f209
LR
527 skb_tailroom(skb),
528 PCI_DMA_FROMDEVICE);
be0418ad 529 pci_unmap_single(sc->pdev, bf->bf_buf_addr,
f078f209
LR
530 sc->sc_rxbufsize,
531 PCI_DMA_FROMDEVICE);
532
be0418ad
S
533 skb_put(skb, ds->ds_rxstat.rs_datalen);
534 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
535
536 /* see if any padding is done by the hw and remove it */
537 hdr = (struct ieee80211_hdr *)skb->data;
538 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
539
540 if (hdrlen & 3) {
541 padsize = hdrlen % 4;
542 memmove(skb->data + padsize, skb->data, hdrlen);
543 skb_pull(skb, padsize);
f078f209
LR
544 }
545
be0418ad 546 keyix = ds->ds_rxstat.rs_keyix;
f078f209 547
be0418ad
S
548 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
549 rx_status.flag |= RX_FLAG_DECRYPTED;
550 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
551 && !decrypt_error && skb->len >= hdrlen + 4) {
552 keyix = skb->data[hdrlen + 3] >> 6;
553
554 if (test_bit(keyix, sc->sc_keymap))
555 rx_status.flag |= RX_FLAG_DECRYPTED;
556 }
557
558 /* Send the frame to mac80211 */
559 __ieee80211_rx(sc->hw, skb, &rx_status);
cb71d9ba
LR
560
561 /* We will now give hardware our shiny new allocated skb */
562 bf->bf_mpdu = requeue_skb;
563 bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data,
564 sc->sc_rxbufsize,
565 PCI_DMA_FROMDEVICE);
566 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
567
568 /*
569 * change the default rx antenna if rx diversity chooses the
570 * other antenna 3 times in a row.
571 */
572 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
573 if (++sc->sc_rxotherant >= 3)
be0418ad 574 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
f078f209
LR
575 } else {
576 sc->sc_rxotherant = 0;
577 }
cb71d9ba
LR
578requeue:
579 list_move_tail(&bf->list, &sc->sc_rxbuf);
580 ath_rx_buf_link(sc, bf);
be0418ad
S
581 } while (1);
582
583 spin_unlock_bh(&sc->sc_rxbuflock);
f078f209
LR
584
585 return 0;
586#undef PA2DESC
587}