]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/wireless/iwlwifi/iwl-core.c
iwlwifi: remove sanity check
[net-next-2.6.git] / drivers / net / wireless / iwlwifi / iwl-core.c
CommitLineData
df48c323 1/******************************************************************************
df48c323
TW
2 *
3 * GPL LICENSE SUMMARY
4 *
1f447808 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
df48c323
TW
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
df48c323
TW
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
8ccde88a 31#include <linux/etherdevice.h>
d43c36dc 32#include <linux/sched.h>
1d0a082d 33#include <net/mac80211.h>
df48c323 34
6bc913bd 35#include "iwl-eeprom.h"
3e0d4cb1 36#include "iwl-dev.h" /* FIXME: remove */
19335774 37#include "iwl-debug.h"
df48c323 38#include "iwl-core.h"
b661c819 39#include "iwl-io.h"
5da4b55f 40#include "iwl-power.h"
83dde8c9 41#include "iwl-sta.h"
ef850d7c 42#include "iwl-helpers.h"
df48c323 43
1d0a082d 44
df48c323
TW
45MODULE_DESCRIPTION("iwl core");
46MODULE_VERSION(IWLWIFI_VERSION);
a7b75207 47MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
712b6cf5 48MODULE_LICENSE("GPL");
df48c323 49
06702a73
WYG
50/*
51 * set bt_coex_active to true, uCode will do kill/defer
52 * every time the priority line is asserted (BT is sending signals on the
53 * priority line in the PCIx).
54 * set bt_coex_active to false, uCode will ignore the BT activity and
55 * perform the normal operation
56 *
57 * User might experience transmit issue on some platform due to WiFi/BT
58 * co-exist problem. The possible behaviors are:
59 * Able to scan and finding all the available AP
60 * Not able to associate with any AP
61 * On those platforms, WiFi communication can be restored by set
62 * "bt_coex_active" module parameter to "false"
63 *
64 * default: bt_coex_active = true (BT_COEX_ENABLE)
65 */
66static bool bt_coex_active = true;
67module_param(bt_coex_active, bool, S_IRUGO);
68MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
69
1933ac4d
WYG
70static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
71 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
72 0, COEX_UNASSOC_IDLE_FLAGS},
73 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
74 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
75 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
76 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
77 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
78 0, COEX_CALIBRATION_FLAGS},
79 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
80 0, COEX_PERIODIC_CALIBRATION_FLAGS},
81 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
82 0, COEX_CONNECTION_ESTAB_FLAGS},
83 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
84 0, COEX_ASSOCIATED_IDLE_FLAGS},
85 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
86 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
87 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
88 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
89 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
90 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
91 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
92 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
93 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
94 0, COEX_STAND_ALONE_DEBUG_FLAGS},
95 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
96 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
97 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
98 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
99};
100
c7de35cd
RR
101#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
102 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
103 IWL_RATE_SISO_##s##M_PLCP, \
104 IWL_RATE_MIMO2_##s##M_PLCP,\
105 IWL_RATE_MIMO3_##s##M_PLCP,\
106 IWL_RATE_##r##M_IEEE, \
107 IWL_RATE_##ip##M_INDEX, \
108 IWL_RATE_##in##M_INDEX, \
109 IWL_RATE_##rp##M_INDEX, \
110 IWL_RATE_##rn##M_INDEX, \
111 IWL_RATE_##pp##M_INDEX, \
112 IWL_RATE_##np##M_INDEX }
113
a562a9dd
RC
114u32 iwl_debug_level;
115EXPORT_SYMBOL(iwl_debug_level);
116
c7de35cd
RR
117/*
118 * Parameter order:
119 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
120 *
121 * If there isn't a valid next or previous rate then INV is used which
122 * maps to IWL_RATE_INVALID
123 *
124 */
1826dcc0 125const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
c7de35cd
RR
126 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
127 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
128 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
129 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
130 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
131 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
132 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
133 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
134 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
135 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
136 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
137 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
138 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
139 /* FIXME:RS: ^^ should be INV (legacy) */
140};
1826dcc0 141EXPORT_SYMBOL(iwl_rates);
c7de35cd 142
e7d326ac
TW
143/**
144 * translate ucode response to mac80211 tx status control values
145 */
146void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
e6a9854b 147 struct ieee80211_tx_info *info)
e7d326ac 148{
e6a9854b 149 struct ieee80211_tx_rate *r = &info->control.rates[0];
e7d326ac 150
e6a9854b 151 info->antenna_sel_tx =
e7d326ac
TW
152 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
153 if (rate_n_flags & RATE_MCS_HT_MSK)
e6a9854b 154 r->flags |= IEEE80211_TX_RC_MCS;
e7d326ac 155 if (rate_n_flags & RATE_MCS_GF_MSK)
e6a9854b 156 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
7aafef1c 157 if (rate_n_flags & RATE_MCS_HT40_MSK)
e6a9854b 158 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
e7d326ac 159 if (rate_n_flags & RATE_MCS_DUP_MSK)
e6a9854b 160 r->flags |= IEEE80211_TX_RC_DUP_DATA;
e7d326ac 161 if (rate_n_flags & RATE_MCS_SGI_MSK)
e6a9854b 162 r->flags |= IEEE80211_TX_RC_SHORT_GI;
31513be8 163 r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
e7d326ac
TW
164}
165EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
166
167int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
168{
169 int idx = 0;
170
171 /* HT rate format */
172 if (rate_n_flags & RATE_MCS_HT_MSK) {
173 idx = (rate_n_flags & 0xff);
174
60d32215
DH
175 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
176 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
177 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
e7d326ac
TW
178 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
179
180 idx += IWL_FIRST_OFDM_RATE;
181 /* skip 9M not supported in ht*/
182 if (idx >= IWL_RATE_9M_INDEX)
183 idx += 1;
184 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
185 return idx;
186
187 /* legacy rate format, search for match in table */
188 } else {
189 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
190 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
191 return idx;
192 }
193
194 return -1;
195}
196EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
197
31513be8
DH
198int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
199{
200 int idx = 0;
201 int band_offset = 0;
202
203 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
204 if (rate_n_flags & RATE_MCS_HT_MSK) {
205 idx = (rate_n_flags & 0xff);
206 return idx;
207 /* Legacy rate format, search for match in table */
208 } else {
209 if (band == IEEE80211_BAND_5GHZ)
210 band_offset = IWL_FIRST_OFDM_RATE;
211 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
212 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
213 return idx - band_offset;
214 }
215
216 return -1;
217}
218
76eff18b
TW
219u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
220{
221 int i;
222 u8 ind = ant;
223 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
224 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
225 if (priv->hw_params.valid_tx_ant & BIT(ind))
226 return ind;
227 }
228 return ant;
229}
47ff65c4 230EXPORT_SYMBOL(iwl_toggle_tx_ant);
57bd1bea
TW
231
232const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
233EXPORT_SYMBOL(iwl_bcast_addr);
234
235
1d0a082d
AK
236/* This function both allocates and initializes hw and priv. */
237struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
238 struct ieee80211_ops *hw_ops)
239{
240 struct iwl_priv *priv;
241
242 /* mac80211 allocates memory for this device instance, including
243 * space for this driver's private structure */
244 struct ieee80211_hw *hw =
245 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
246 if (hw == NULL) {
a3139c59
SO
247 printk(KERN_ERR "%s: Can not allocate network device\n",
248 cfg->name);
1d0a082d
AK
249 goto out;
250 }
251
252 priv = hw->priv;
253 priv->hw = hw;
254
255out:
256 return hw;
257}
258EXPORT_SYMBOL(iwl_alloc_all);
259
b661c819
TW
260void iwl_hw_detect(struct iwl_priv *priv)
261{
262 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
263 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
264 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
265}
266EXPORT_SYMBOL(iwl_hw_detect);
267
1053d35f
RR
268int iwl_hw_nic_init(struct iwl_priv *priv)
269{
270 unsigned long flags;
271 struct iwl_rx_queue *rxq = &priv->rxq;
272 int ret;
273
274 /* nic_init */
1053d35f 275 spin_lock_irqsave(&priv->lock, flags);
1b73af82 276 priv->cfg->ops->lib->apm_ops.init(priv);
74ba67ed 277
2be76703
WYG
278 /* Set interrupt coalescing calibration timer to default (512 usecs) */
279 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
74ba67ed 280
1053d35f
RR
281 spin_unlock_irqrestore(&priv->lock, flags);
282
283 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
284
285 priv->cfg->ops->lib->apm_ops.config(priv);
286
287 /* Allocate the RX queue, or reset if it is already allocated */
288 if (!rxq->bd) {
289 ret = iwl_rx_queue_alloc(priv);
290 if (ret) {
15b1687c 291 IWL_ERR(priv, "Unable to initialize Rx queue\n");
1053d35f
RR
292 return -ENOMEM;
293 }
294 } else
295 iwl_rx_queue_reset(priv, rxq);
296
297 iwl_rx_replenish(priv);
298
299 iwl_rx_init(priv, rxq);
300
301 spin_lock_irqsave(&priv->lock, flags);
302
303 rxq->need_update = 1;
304 iwl_rx_queue_update_write_ptr(priv, rxq);
305
306 spin_unlock_irqrestore(&priv->lock, flags);
307
308 /* Allocate and init all Tx and Command queues */
309 ret = iwl_txq_ctx_reset(priv);
310 if (ret)
311 return ret;
312
313 set_bit(STATUS_INIT, &priv->status);
314
315 return 0;
316}
317EXPORT_SYMBOL(iwl_hw_nic_init);
318
14d2aac5
AK
319/*
320 * QoS support
321*/
322void iwl_activate_qos(struct iwl_priv *priv, u8 force)
323{
324 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
325 return;
326
327 priv->qos_data.def_qos_parm.qos_flags = 0;
328
329 if (priv->qos_data.qos_cap.q_AP.queue_request &&
330 !priv->qos_data.qos_cap.q_AP.txop_request)
331 priv->qos_data.def_qos_parm.qos_flags |=
332 QOS_PARAM_FLG_TXOP_TYPE_MSK;
333 if (priv->qos_data.qos_active)
334 priv->qos_data.def_qos_parm.qos_flags |=
335 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
336
337 if (priv->current_ht_config.is_ht)
338 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
339
340 if (force || iwl_is_associated(priv)) {
341 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
342 priv->qos_data.qos_active,
343 priv->qos_data.def_qos_parm.qos_flags);
344
345 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
346 sizeof(struct iwl_qosparam_cmd),
347 &priv->qos_data.def_qos_parm, NULL);
348 }
349}
350EXPORT_SYMBOL(iwl_activate_qos);
351
f2c95b04
WYG
352/*
353 * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
354 * (802.11b) (802.11a/g)
355 * AC_BK 15 1023 7 0 0
356 * AC_BE 15 1023 3 0 0
357 * AC_VI 7 15 2 6.016ms 3.008ms
358 * AC_VO 3 7 2 3.264ms 1.504ms
359 */
c7de35cd 360void iwl_reset_qos(struct iwl_priv *priv)
bf85ea4f
AK
361{
362 u16 cw_min = 15;
363 u16 cw_max = 1023;
364 u8 aifs = 2;
30dab79e 365 bool is_legacy = false;
bf85ea4f
AK
366 unsigned long flags;
367 int i;
368
369 spin_lock_irqsave(&priv->lock, flags);
30dab79e
WT
370 /* QoS always active in AP and ADHOC mode
371 * In STA mode wait for association
372 */
373 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
374 priv->iw_mode == NL80211_IFTYPE_AP)
375 priv->qos_data.qos_active = 1;
376 else
377 priv->qos_data.qos_active = 0;
bf85ea4f 378
30dab79e
WT
379 /* check for legacy mode */
380 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
381 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
382 (priv->iw_mode == NL80211_IFTYPE_STATION &&
383 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
bf85ea4f
AK
384 cw_min = 31;
385 is_legacy = 1;
386 }
387
388 if (priv->qos_data.qos_active)
389 aifs = 3;
390
f2c95b04 391 /* AC_BE */
bf85ea4f
AK
392 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
393 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
394 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
395 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
396 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
397
398 if (priv->qos_data.qos_active) {
f2c95b04 399 /* AC_BK */
bf85ea4f
AK
400 i = 1;
401 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
402 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
403 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
404 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
405 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
406
f2c95b04 407 /* AC_VI */
bf85ea4f
AK
408 i = 2;
409 priv->qos_data.def_qos_parm.ac[i].cw_min =
410 cpu_to_le16((cw_min + 1) / 2 - 1);
411 priv->qos_data.def_qos_parm.ac[i].cw_max =
f2c95b04 412 cpu_to_le16(cw_min);
bf85ea4f
AK
413 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
414 if (is_legacy)
415 priv->qos_data.def_qos_parm.ac[i].edca_txop =
416 cpu_to_le16(6016);
417 else
418 priv->qos_data.def_qos_parm.ac[i].edca_txop =
419 cpu_to_le16(3008);
420 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
421
f2c95b04 422 /* AC_VO */
bf85ea4f
AK
423 i = 3;
424 priv->qos_data.def_qos_parm.ac[i].cw_min =
425 cpu_to_le16((cw_min + 1) / 4 - 1);
426 priv->qos_data.def_qos_parm.ac[i].cw_max =
f2c95b04 427 cpu_to_le16((cw_min + 1) / 2 - 1);
bf85ea4f
AK
428 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
429 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
430 if (is_legacy)
431 priv->qos_data.def_qos_parm.ac[i].edca_txop =
432 cpu_to_le16(3264);
433 else
434 priv->qos_data.def_qos_parm.ac[i].edca_txop =
435 cpu_to_le16(1504);
436 } else {
437 for (i = 1; i < 4; i++) {
438 priv->qos_data.def_qos_parm.ac[i].cw_min =
439 cpu_to_le16(cw_min);
440 priv->qos_data.def_qos_parm.ac[i].cw_max =
441 cpu_to_le16(cw_max);
442 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
443 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
444 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
445 }
446 }
e1623446 447 IWL_DEBUG_QOS(priv, "set QoS to default \n");
bf85ea4f
AK
448
449 spin_unlock_irqrestore(&priv->lock, flags);
450}
c7de35cd
RR
451EXPORT_SYMBOL(iwl_reset_qos);
452
d9fe60de
JB
453#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
454#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
c7de35cd 455static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
d9fe60de 456 struct ieee80211_sta_ht_cap *ht_info,
c7de35cd
RR
457 enum ieee80211_band band)
458{
39130df3
RR
459 u16 max_bit_rate = 0;
460 u8 rx_chains_num = priv->hw_params.rx_chains_num;
461 u8 tx_chains_num = priv->hw_params.tx_chains_num;
462
c7de35cd 463 ht_info->cap = 0;
d9fe60de 464 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
c7de35cd 465
d9fe60de 466 ht_info->ht_supported = true;
c7de35cd 467
b261793d
DH
468 if (priv->cfg->ht_greenfield_support)
469 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
d9fe60de 470 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
39130df3 471 max_bit_rate = MAX_BIT_RATE_20_MHZ;
7aafef1c 472 if (priv->hw_params.ht40_channel & BIT(band)) {
d9fe60de
JB
473 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
474 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
475 ht_info->mcs.rx_mask[4] = 0x01;
39130df3 476 max_bit_rate = MAX_BIT_RATE_40_MHZ;
c7de35cd 477 }
c7de35cd
RR
478
479 if (priv->cfg->mod_params->amsdu_size_8K)
d9fe60de 480 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
c7de35cd
RR
481
482 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
483 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
484
d9fe60de 485 ht_info->mcs.rx_mask[0] = 0xFF;
39130df3 486 if (rx_chains_num >= 2)
d9fe60de 487 ht_info->mcs.rx_mask[1] = 0xFF;
39130df3 488 if (rx_chains_num >= 3)
d9fe60de 489 ht_info->mcs.rx_mask[2] = 0xFF;
39130df3
RR
490
491 /* Highest supported Rx data rate */
492 max_bit_rate *= rx_chains_num;
d9fe60de
JB
493 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
494 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
39130df3
RR
495
496 /* Tx MCS capabilities */
d9fe60de 497 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
39130df3 498 if (tx_chains_num != rx_chains_num) {
d9fe60de
JB
499 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
500 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
501 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
39130df3 502 }
c7de35cd 503}
c7de35cd 504
c7de35cd
RR
505/**
506 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
507 */
534166de 508int iwlcore_init_geos(struct iwl_priv *priv)
c7de35cd
RR
509{
510 struct iwl_channel_info *ch;
511 struct ieee80211_supported_band *sband;
512 struct ieee80211_channel *channels;
513 struct ieee80211_channel *geo_ch;
514 struct ieee80211_rate *rates;
515 int i = 0;
516
517 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
518 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
e1623446 519 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
c7de35cd
RR
520 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
521 return 0;
522 }
523
524 channels = kzalloc(sizeof(struct ieee80211_channel) *
525 priv->channel_count, GFP_KERNEL);
526 if (!channels)
527 return -ENOMEM;
528
5027309b 529 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
c7de35cd
RR
530 GFP_KERNEL);
531 if (!rates) {
532 kfree(channels);
533 return -ENOMEM;
534 }
535
536 /* 5.2GHz channels start after the 2.4GHz channels */
537 sband = &priv->bands[IEEE80211_BAND_5GHZ];
538 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
539 /* just OFDM */
540 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
5027309b 541 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
c7de35cd 542
49779293 543 if (priv->cfg->sku & IWL_SKU_N)
d9fe60de 544 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
49779293 545 IEEE80211_BAND_5GHZ);
c7de35cd
RR
546
547 sband = &priv->bands[IEEE80211_BAND_2GHZ];
548 sband->channels = channels;
549 /* OFDM & CCK */
550 sband->bitrates = rates;
5027309b 551 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
c7de35cd 552
49779293 553 if (priv->cfg->sku & IWL_SKU_N)
d9fe60de 554 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
49779293 555 IEEE80211_BAND_2GHZ);
c7de35cd
RR
556
557 priv->ieee_channels = channels;
558 priv->ieee_rates = rates;
559
c7de35cd
RR
560 for (i = 0; i < priv->channel_count; i++) {
561 ch = &priv->channel_info[i];
562
563 /* FIXME: might be removed if scan is OK */
564 if (!is_channel_valid(ch))
565 continue;
566
567 if (is_channel_a_band(ch))
568 sband = &priv->bands[IEEE80211_BAND_5GHZ];
569 else
570 sband = &priv->bands[IEEE80211_BAND_2GHZ];
571
572 geo_ch = &sband->channels[sband->n_channels++];
573
574 geo_ch->center_freq =
575 ieee80211_channel_to_frequency(ch->channel);
576 geo_ch->max_power = ch->max_power_avg;
577 geo_ch->max_antenna_gain = 0xff;
578 geo_ch->hw_value = ch->channel;
579
580 if (is_channel_valid(ch)) {
581 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
582 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
583
584 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
585 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
586
587 if (ch->flags & EEPROM_CHANNEL_RADAR)
588 geo_ch->flags |= IEEE80211_CHAN_RADAR;
589
7aafef1c 590 geo_ch->flags |= ch->ht40_extension_channel;
4d38c2e8 591
dc1b0973
WYG
592 if (ch->max_power_avg > priv->tx_power_device_lmt)
593 priv->tx_power_device_lmt = ch->max_power_avg;
c7de35cd
RR
594 } else {
595 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
596 }
597
e1623446 598 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
c7de35cd
RR
599 ch->channel, geo_ch->center_freq,
600 is_channel_a_band(ch) ? "5.2" : "2.4",
601 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
602 "restricted" : "valid",
603 geo_ch->flags);
604 }
605
606 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
607 priv->cfg->sku & IWL_SKU_A) {
978785a3
TW
608 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
609 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
a3139c59
SO
610 priv->pci_dev->device,
611 priv->pci_dev->subsystem_device);
c7de35cd
RR
612 priv->cfg->sku &= ~IWL_SKU_A;
613 }
614
978785a3 615 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
a3139c59
SO
616 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
617 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
c7de35cd
RR
618
619 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
620
621 return 0;
622}
534166de 623EXPORT_SYMBOL(iwlcore_init_geos);
c7de35cd
RR
624
625/*
626 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
627 */
534166de 628void iwlcore_free_geos(struct iwl_priv *priv)
c7de35cd
RR
629{
630 kfree(priv->ieee_channels);
631 kfree(priv->ieee_rates);
632 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
633}
534166de 634EXPORT_SYMBOL(iwlcore_free_geos);
c7de35cd 635
37dc70fe
AK
636/*
637 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
638 * function.
639 */
640void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
641 __le32 *tx_flags)
642{
643 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
644 *tx_flags |= TX_CMD_FLG_RTS_MSK;
645 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
646 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
647 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
648 *tx_flags |= TX_CMD_FLG_CTS_MSK;
649 }
650}
651EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
652
28a6b07a 653static bool is_single_rx_stream(struct iwl_priv *priv)
c7de35cd 654{
ba37a3d0 655 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
02bb1bea 656 priv->current_ht_config.single_chain_sufficient;
c7de35cd 657}
963f5517 658
47c5196e
TW
659static u8 iwl_is_channel_extension(struct iwl_priv *priv,
660 enum ieee80211_band band,
661 u16 channel, u8 extension_chan_offset)
662{
663 const struct iwl_channel_info *ch_info;
664
665 ch_info = iwl_get_channel_info(priv, band, channel);
666 if (!is_channel_valid(ch_info))
667 return 0;
668
d9fe60de 669 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
7aafef1c 670 return !(ch_info->ht40_extension_channel &
689da1b3 671 IEEE80211_CHAN_NO_HT40PLUS);
d9fe60de 672 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
7aafef1c 673 return !(ch_info->ht40_extension_channel &
689da1b3 674 IEEE80211_CHAN_NO_HT40MINUS);
47c5196e
TW
675
676 return 0;
677}
678
7aafef1c 679u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
d9fe60de 680 struct ieee80211_sta_ht_cap *sta_ht_inf)
47c5196e 681{
fad95bf5 682 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
47c5196e 683
fad95bf5 684 if (!ht_conf->is_ht || !ht_conf->is_40mhz)
47c5196e
TW
685 return 0;
686
a2b0f02e
WYG
687 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
688 * the bit will not set if it is pure 40MHz case
689 */
47c5196e 690 if (sta_ht_inf) {
a2b0f02e 691 if (!sta_ht_inf->ht_supported)
47c5196e
TW
692 return 0;
693 }
1e4247d4
WYG
694#ifdef CONFIG_IWLWIFI_DEBUG
695 if (priv->disable_ht40)
696 return 0;
697#endif
611d3eb7
WYG
698 return iwl_is_channel_extension(priv, priv->band,
699 le16_to_cpu(priv->staging_rxon.channel),
fad95bf5 700 ht_conf->extension_chan_offset);
47c5196e 701}
7aafef1c 702EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
47c5196e 703
2c2f3b33
TW
704static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
705{
706 u16 new_val = 0;
707 u16 beacon_factor = 0;
708
709 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
710 new_val = beacon_val / beacon_factor;
711
712 if (!new_val)
713 new_val = max_beacon_val;
714
715 return new_val;
716}
717
718void iwl_setup_rxon_timing(struct iwl_priv *priv)
719{
720 u64 tsf;
721 s32 interval_tm, rem;
722 unsigned long flags;
723 struct ieee80211_conf *conf = NULL;
724 u16 beacon_int;
725
726 conf = ieee80211_get_hw_conf(priv->hw);
727
728 spin_lock_irqsave(&priv->lock, flags);
729 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
730 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
731
732 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
733 beacon_int = priv->beacon_int;
734 priv->rxon_timing.atim_window = 0;
735 } else {
736 beacon_int = priv->vif->bss_conf.beacon_int;
737
738 /* TODO: we need to get atim_window from upper stack
739 * for now we set to 0 */
740 priv->rxon_timing.atim_window = 0;
741 }
742
743 beacon_int = iwl_adjust_beacon_interval(beacon_int,
744 priv->hw_params.max_beacon_itrvl * 1024);
745 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
746
747 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
748 interval_tm = beacon_int * 1024;
749 rem = do_div(tsf, interval_tm);
750 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
751
752 spin_unlock_irqrestore(&priv->lock, flags);
753 IWL_DEBUG_ASSOC(priv,
754 "beacon interval %d beacon timer %d beacon tim %d\n",
755 le16_to_cpu(priv->rxon_timing.beacon_interval),
756 le32_to_cpu(priv->rxon_timing.beacon_init_val),
757 le16_to_cpu(priv->rxon_timing.atim_window));
758}
759EXPORT_SYMBOL(iwl_setup_rxon_timing);
760
8ccde88a
SO
761void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
762{
763 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
764
765 if (hw_decrypt)
766 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
767 else
768 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
769
770}
771EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
772
773/**
774 * iwl_check_rxon_cmd - validate RXON structure is valid
775 *
776 * NOTE: This is really only useful during development and can eventually
777 * be #ifdef'd out once the driver is stable and folks aren't actively
778 * making changes
779 */
780int iwl_check_rxon_cmd(struct iwl_priv *priv)
781{
782 int error = 0;
783 int counter = 1;
784 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
785
786 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
787 error |= le32_to_cpu(rxon->flags &
788 (RXON_FLG_TGJ_NARROW_BAND_MSK |
789 RXON_FLG_RADAR_DETECT_MSK));
790 if (error)
791 IWL_WARN(priv, "check 24G fields %d | %d\n",
792 counter++, error);
793 } else {
794 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
795 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
796 if (error)
797 IWL_WARN(priv, "check 52 fields %d | %d\n",
798 counter++, error);
799 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
800 if (error)
801 IWL_WARN(priv, "check 52 CCK %d | %d\n",
802 counter++, error);
803 }
804 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
805 if (error)
806 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
807
808 /* make sure basic rates 6Mbps and 1Mbps are supported */
809 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
810 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
811 if (error)
812 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
813
814 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
815 if (error)
816 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
817
818 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
819 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
820 if (error)
821 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
822 counter++, error);
823
824 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
825 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
826 if (error)
827 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
828 counter++, error);
829
830 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
831 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
832 if (error)
833 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
834 counter++, error);
835
836 if (error)
837 IWL_WARN(priv, "Tuning to channel %d\n",
838 le16_to_cpu(rxon->channel));
839
840 if (error) {
841 IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n");
842 return -1;
843 }
844 return 0;
845}
846EXPORT_SYMBOL(iwl_check_rxon_cmd);
847
848/**
849 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
850 * @priv: staging_rxon is compared to active_rxon
851 *
852 * If the RXON structure is changing enough to require a new tune,
853 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
854 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
855 */
856int iwl_full_rxon_required(struct iwl_priv *priv)
857{
858
859 /* These items are only settable from the full RXON command */
860 if (!(iwl_is_associated(priv)) ||
861 compare_ether_addr(priv->staging_rxon.bssid_addr,
862 priv->active_rxon.bssid_addr) ||
863 compare_ether_addr(priv->staging_rxon.node_addr,
864 priv->active_rxon.node_addr) ||
865 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
866 priv->active_rxon.wlap_bssid_addr) ||
867 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
868 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
869 (priv->staging_rxon.air_propagation !=
870 priv->active_rxon.air_propagation) ||
871 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
872 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
873 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
874 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
c2105fa7
DH
875 (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
876 priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
8ccde88a
SO
877 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
878 return 1;
879
880 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
881 * be updated with the RXON_ASSOC command -- however only some
882 * flag transitions are allowed using RXON_ASSOC */
883
884 /* Check if we are not switching bands */
885 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
886 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
887 return 1;
888
889 /* Check if we are switching association toggle */
890 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
891 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
892 return 1;
893
894 return 0;
895}
896EXPORT_SYMBOL(iwl_full_rxon_required);
897
898u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
899{
4a02886b
JB
900 /*
901 * Assign the lowest rate -- should really get this from
902 * the beacon skb from mac80211.
903 */
8ccde88a
SO
904 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
905 return IWL_RATE_1M_PLCP;
906 else
907 return IWL_RATE_6M_PLCP;
908}
909EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
910
fad95bf5 911void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
47c5196e 912{
c1adf9fb 913 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
47c5196e 914
fad95bf5 915 if (!ht_conf->is_ht) {
a2b0f02e 916 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
42eb7c64 917 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
7aafef1c 918 RXON_FLG_HT40_PROT_MSK |
42eb7c64 919 RXON_FLG_HT_PROT_MSK);
47c5196e 920 return;
42eb7c64 921 }
47c5196e 922
a2b0f02e
WYG
923 /* FIXME: if the definition of ht_protection changed, the "translation"
924 * will be needed for rxon->flags
925 */
fad95bf5 926 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
a2b0f02e
WYG
927
928 /* Set up channel bandwidth:
7aafef1c 929 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
a2b0f02e
WYG
930 /* clear the HT channel mode before set the mode */
931 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
932 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
7aafef1c
WYG
933 if (iwl_is_ht40_tx_allowed(priv, NULL)) {
934 /* pure ht40 */
fad95bf5 935 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
a2b0f02e 936 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
508b08e7 937 /* Note: control channel is opposite of extension channel */
fad95bf5 938 switch (ht_conf->extension_chan_offset) {
508b08e7
WYG
939 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
940 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
941 break;
942 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
943 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
944 break;
945 }
946 } else {
a2b0f02e 947 /* Note: control channel is opposite of extension channel */
fad95bf5 948 switch (ht_conf->extension_chan_offset) {
a2b0f02e
WYG
949 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
950 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
951 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
952 break;
953 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
954 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
955 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
956 break;
957 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
958 default:
959 /* channel location only valid if in Mixed mode */
960 IWL_ERR(priv, "invalid extension channel offset\n");
961 break;
962 }
963 }
964 } else {
965 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
47c5196e
TW
966 }
967
45823531
AK
968 if (priv->cfg->ops->hcmd->set_rxon_chain)
969 priv->cfg->ops->hcmd->set_rxon_chain(priv);
47c5196e 970
02bb1bea 971 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
ae5eb026 972 "extension channel offset 0x%x\n",
fad95bf5
JB
973 le32_to_cpu(rxon->flags), ht_conf->ht_protection,
974 ht_conf->extension_chan_offset);
47c5196e
TW
975 return;
976}
977EXPORT_SYMBOL(iwl_set_rxon_ht);
978
9e5e6c32
TW
979#define IWL_NUM_RX_CHAINS_MULTIPLE 3
980#define IWL_NUM_RX_CHAINS_SINGLE 2
981#define IWL_NUM_IDLE_CHAINS_DUAL 2
982#define IWL_NUM_IDLE_CHAINS_SINGLE 1
983
2b396a12
JB
984/*
985 * Determine how many receiver/antenna chains to use.
986 *
987 * More provides better reception via diversity. Fewer saves power
988 * at the expense of throughput, but only when not in powersave to
989 * start with.
990 *
c7de35cd
RR
991 * MIMO (dual stream) requires at least 2, but works better with 3.
992 * This does not determine *which* chains to use, just how many.
993 */
28a6b07a 994static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
c7de35cd 995{
c7de35cd 996 /* # of Rx chains to use when expecting MIMO. */
02bb1bea 997 if (is_single_rx_stream(priv))
9e5e6c32 998 return IWL_NUM_RX_CHAINS_SINGLE;
c7de35cd 999 else
9e5e6c32 1000 return IWL_NUM_RX_CHAINS_MULTIPLE;
28a6b07a 1001}
c7de35cd 1002
2b396a12 1003/*
3f3e0376
WYG
1004 * When we are in power saving mode, unless device support spatial
1005 * multiplexing power save, use the active count for rx chain count.
2b396a12 1006 */
28a6b07a
TW
1007static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1008{
ba37a3d0
JB
1009 /* # Rx chains when idling, depending on SMPS mode */
1010 switch (priv->current_ht_config.smps) {
1011 case IEEE80211_SMPS_STATIC:
1012 case IEEE80211_SMPS_DYNAMIC:
1013 return IWL_NUM_IDLE_CHAINS_SINGLE;
1014 case IEEE80211_SMPS_OFF:
1015 return active_cnt;
c15d20c1 1016 default:
ba37a3d0
JB
1017 WARN(1, "invalid SMPS mode %d",
1018 priv->current_ht_config.smps);
1019 return active_cnt;
3f3e0376 1020 }
c7de35cd
RR
1021}
1022
04816448
GE
1023/* up to 4 chains */
1024static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1025{
1026 u8 res;
1027 res = (chain_bitmap & BIT(0)) >> 0;
1028 res += (chain_bitmap & BIT(1)) >> 1;
1029 res += (chain_bitmap & BIT(2)) >> 2;
9bddbab3 1030 res += (chain_bitmap & BIT(3)) >> 3;
04816448
GE
1031 return res;
1032}
1033
4c4df78f
CR
1034/**
1035 * iwl_is_monitor_mode - Determine if interface in monitor mode
1036 *
1037 * priv->iw_mode is set in add_interface, but add_interface is
1038 * never called for monitor mode. The only way mac80211 informs us about
1039 * monitor mode is through configuring filters (call to configure_filter).
1040 */
279b05d4 1041bool iwl_is_monitor_mode(struct iwl_priv *priv)
4c4df78f
CR
1042{
1043 return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
1044}
279b05d4 1045EXPORT_SYMBOL(iwl_is_monitor_mode);
4c4df78f 1046
c7de35cd
RR
1047/**
1048 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1049 *
1050 * Selects how many and which Rx receivers/antennas/chains to use.
1051 * This should not be used for scan command ... it puts data in wrong place.
1052 */
1053void iwl_set_rxon_chain(struct iwl_priv *priv)
1054{
28a6b07a
TW
1055 bool is_single = is_single_rx_stream(priv);
1056 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
04816448
GE
1057 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1058 u32 active_chains;
28a6b07a 1059 u16 rx_chain;
c7de35cd
RR
1060
1061 /* Tell uCode which antennas are actually connected.
1062 * Before first association, we assume all antennas are connected.
1063 * Just after first association, iwl_chain_noise_calibration()
1064 * checks which antennas actually *are* connected. */
04816448
GE
1065 if (priv->chain_noise_data.active_chains)
1066 active_chains = priv->chain_noise_data.active_chains;
1067 else
1068 active_chains = priv->hw_params.valid_rx_ant;
1069
1070 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
c7de35cd
RR
1071
1072 /* How many receivers should we use? */
28a6b07a
TW
1073 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
1074 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
1075
28a6b07a 1076
04816448
GE
1077 /* correct rx chain count according hw settings
1078 * and chain noise calibration
1079 */
1080 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
1081 if (valid_rx_cnt < active_rx_cnt)
1082 active_rx_cnt = valid_rx_cnt;
1083
1084 if (valid_rx_cnt < idle_rx_cnt)
1085 idle_rx_cnt = valid_rx_cnt;
28a6b07a
TW
1086
1087 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1088 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1089
7b841727
RF
1090 /* copied from 'iwl_bg_request_scan()' */
1091 /* Force use of chains B and C (0x6) for Rx for 4965
1092 * Avoid A (0x1) because of its off-channel reception on A-band.
1093 * MIMO is not used here, but value is required */
1094 if (iwl_is_monitor_mode(priv) &&
1095 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
1096 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
fff7a434
WYG
1097 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
1098 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
1099 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1100 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
7b841727
RF
1101 }
1102
28a6b07a
TW
1103 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
1104
9e5e6c32 1105 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
c7de35cd
RR
1106 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1107 else
1108 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1109
e1623446 1110 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
28a6b07a
TW
1111 priv->staging_rxon.rx_chain,
1112 active_rx_cnt, idle_rx_cnt);
1113
1114 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1115 active_rx_cnt < idle_rx_cnt);
c7de35cd
RR
1116}
1117EXPORT_SYMBOL(iwl_set_rxon_chain);
bf85ea4f
AK
1118
1119/**
17e72782 1120 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
bf85ea4f
AK
1121 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
1122 * @channel: Any channel valid for the requested phymode
1123
1124 * In addition to setting the staging RXON, priv->phymode is also set.
1125 *
1126 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
1127 * in the staging RXON flag structure based on the phymode
1128 */
17e72782 1129int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
bf85ea4f 1130{
17e72782
TW
1131 enum ieee80211_band band = ch->band;
1132 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
1133
8622e705 1134 if (!iwl_get_channel_info(priv, band, channel)) {
e1623446 1135 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
bf85ea4f
AK
1136 channel, band);
1137 return -EINVAL;
1138 }
1139
1140 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
1141 (priv->band == band))
1142 return 0;
1143
1144 priv->staging_rxon.channel = cpu_to_le16(channel);
1145 if (band == IEEE80211_BAND_5GHZ)
1146 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
1147 else
1148 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1149
1150 priv->band = band;
1151
e1623446 1152 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
bf85ea4f
AK
1153
1154 return 0;
1155}
c7de35cd 1156EXPORT_SYMBOL(iwl_set_rxon_channel);
bf85ea4f 1157
8ccde88a
SO
1158void iwl_set_flags_for_band(struct iwl_priv *priv,
1159 enum ieee80211_band band)
1160{
1161 if (band == IEEE80211_BAND_5GHZ) {
1162 priv->staging_rxon.flags &=
1163 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1164 | RXON_FLG_CCK_MSK);
1165 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1166 } else {
1167 /* Copied from iwl_post_associate() */
1168 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
1169 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1170 else
1171 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1172
1173 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
1174 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1175
1176 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1177 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1178 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
1179 }
1180}
8ccde88a
SO
1181
1182/*
1183 * initialize rxon structure with default values from eeprom
1184 */
1185void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1186{
1187 const struct iwl_channel_info *ch_info;
1188
1189 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
1190
1191 switch (mode) {
1192 case NL80211_IFTYPE_AP:
1193 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
1194 break;
1195
1196 case NL80211_IFTYPE_STATION:
1197 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
1198 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
1199 break;
1200
1201 case NL80211_IFTYPE_ADHOC:
1202 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1203 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1204 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1205 RXON_FILTER_ACCEPT_GRP_MSK;
1206 break;
1207
8ccde88a
SO
1208 default:
1209 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
1210 break;
1211 }
1212
1213#if 0
1214 /* TODO: Figure out when short_preamble would be set and cache from
1215 * that */
1216 if (!hw_to_local(priv->hw)->short_preamble)
1217 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1218 else
1219 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1220#endif
1221
1222 ch_info = iwl_get_channel_info(priv, priv->band,
1223 le16_to_cpu(priv->active_rxon.channel));
1224
1225 if (!ch_info)
1226 ch_info = &priv->channel_info[0];
1227
8ccde88a
SO
1228 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1229 priv->band = ch_info->band;
1230
1231 iwl_set_flags_for_band(priv, priv->band);
1232
1233 priv->staging_rxon.ofdm_basic_rates =
1234 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1235 priv->staging_rxon.cck_basic_rates =
1236 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1237
a2b0f02e
WYG
1238 /* clear both MIX and PURE40 mode flag */
1239 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1240 RXON_FLG_CHANNEL_MODE_PURE_40);
8ccde88a
SO
1241 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1242 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1243 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1244 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
11397a65 1245 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
8ccde88a
SO
1246}
1247EXPORT_SYMBOL(iwl_connection_init_rx_config);
1248
782571f4 1249static void iwl_set_rate(struct iwl_priv *priv)
8ccde88a
SO
1250{
1251 const struct ieee80211_supported_band *hw = NULL;
1252 struct ieee80211_rate *rate;
1253 int i;
1254
1255 hw = iwl_get_hw_mode(priv, priv->band);
1256 if (!hw) {
1257 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
1258 return;
1259 }
1260
1261 priv->active_rate = 0;
8ccde88a
SO
1262
1263 for (i = 0; i < hw->n_bitrates; i++) {
1264 rate = &(hw->bitrates[i]);
5027309b 1265 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
8ccde88a
SO
1266 priv->active_rate |= (1 << rate->hw_value);
1267 }
1268
4a02886b 1269 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
8ccde88a 1270
4a02886b
JB
1271 priv->staging_rxon.cck_basic_rates =
1272 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1273
1274 priv->staging_rxon.ofdm_basic_rates =
1275 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
8ccde88a 1276}
8ccde88a
SO
1277
1278void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1279{
2f301227 1280 struct iwl_rx_packet *pkt = rxb_addr(rxb);
8ccde88a
SO
1281 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1282 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
4a56e965 1283
0924e519
WYG
1284 if (priv->switch_rxon.switch_in_progress) {
1285 if (!le32_to_cpu(csa->status) &&
1286 (csa->channel == priv->switch_rxon.channel)) {
1287 rxon->channel = csa->channel;
1288 priv->staging_rxon.channel = csa->channel;
1289 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1290 le16_to_cpu(csa->channel));
1291 } else
1292 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1293 le16_to_cpu(csa->channel));
1294
1295 priv->switch_rxon.switch_in_progress = false;
1296 }
8ccde88a
SO
1297}
1298EXPORT_SYMBOL(iwl_rx_csa);
1299
1300#ifdef CONFIG_IWLWIFI_DEBUG
a643565e 1301void iwl_print_rx_config_cmd(struct iwl_priv *priv)
8ccde88a
SO
1302{
1303 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1304
e1623446 1305 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
3d816c77 1306 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
e1623446
TW
1307 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
1308 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
1309 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
8ccde88a 1310 le32_to_cpu(rxon->filter_flags));
e1623446
TW
1311 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
1312 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
8ccde88a 1313 rxon->ofdm_basic_rates);
e1623446
TW
1314 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1315 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
1316 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1317 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
8ccde88a 1318}
a643565e 1319EXPORT_SYMBOL(iwl_print_rx_config_cmd);
6686d17e 1320#endif
8ccde88a
SO
1321/**
1322 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1323 */
1324void iwl_irq_handle_error(struct iwl_priv *priv)
1325{
1326 /* Set the FW error flag -- cleared on iwl_down */
1327 set_bit(STATUS_FW_ERROR, &priv->status);
1328
1329 /* Cancel currently queued command. */
1330 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1331
3a3ff72c 1332 priv->cfg->ops->lib->dump_nic_error_log(priv);
696bdee3
WYG
1333 if (priv->cfg->ops->lib->dump_csr)
1334 priv->cfg->ops->lib->dump_csr(priv);
1b3eb823
WYG
1335 if (priv->cfg->ops->lib->dump_fh)
1336 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
b03d7d0f 1337 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
8ccde88a 1338#ifdef CONFIG_IWLWIFI_DEBUG
c341ddb2 1339 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
8ccde88a 1340 iwl_print_rx_config_cmd(priv);
8ccde88a
SO
1341#endif
1342
1343 wake_up_interruptible(&priv->wait_command_queue);
1344
1345 /* Keep the restart process from trying to send host
1346 * commands by clearing the INIT status bit */
1347 clear_bit(STATUS_READY, &priv->status);
1348
1349 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
e1623446 1350 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
8ccde88a
SO
1351 "Restarting adapter due to uCode error.\n");
1352
8ccde88a
SO
1353 if (priv->cfg->mod_params->restart_fw)
1354 queue_work(priv->workqueue, &priv->restart);
1355 }
1356}
1357EXPORT_SYMBOL(iwl_irq_handle_error);
1358
d68b603c
AK
1359int iwl_apm_stop_master(struct iwl_priv *priv)
1360{
5220af0c 1361 int ret = 0;
d68b603c 1362
5220af0c 1363 /* stop device's busmaster DMA activity */
d68b603c
AK
1364 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1365
5220af0c 1366 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
d68b603c 1367 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
5220af0c
BC
1368 if (ret)
1369 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
d68b603c 1370
d68b603c
AK
1371 IWL_DEBUG_INFO(priv, "stop master\n");
1372
5220af0c 1373 return ret;
d68b603c
AK
1374}
1375EXPORT_SYMBOL(iwl_apm_stop_master);
1376
1377void iwl_apm_stop(struct iwl_priv *priv)
1378{
fadb3582
BC
1379 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1380
5220af0c 1381 /* Stop device's DMA activity */
d68b603c
AK
1382 iwl_apm_stop_master(priv);
1383
5220af0c 1384 /* Reset the entire device */
d68b603c
AK
1385 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1386
1387 udelay(10);
5220af0c
BC
1388
1389 /*
1390 * Clear "initialization complete" bit to move adapter from
1391 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1392 */
d68b603c 1393 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
d68b603c
AK
1394}
1395EXPORT_SYMBOL(iwl_apm_stop);
1396
fadb3582
BC
1397
1398/*
1399 * Start up NIC's basic functionality after it has been reset
1400 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1401 * NOTE: This does not load uCode nor start the embedded processor
1402 */
1403int iwl_apm_init(struct iwl_priv *priv)
1404{
1405 int ret = 0;
1406 u16 lctl;
1407
1408 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1409
1410 /*
1411 * Use "set_bit" below rather than "write", to preserve any hardware
1412 * bits already set by default after reset.
1413 */
1414
1415 /* Disable L0S exit timer (platform NMI Work/Around) */
1416 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1417 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1418
1419 /*
1420 * Disable L0s without affecting L1;
1421 * don't wait for ICH L0s (ICH bug W/A)
1422 */
1423 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1424 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1425
1426 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1427 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1428
1429 /*
1430 * Enable HAP INTA (interrupt from management bus) to
1431 * wake device's PCI Express link L1a -> L0s
1432 * NOTE: This is no-op for 3945 (non-existant bit)
1433 */
1434 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1435 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1436
1437 /*
a6c5c731
BC
1438 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1439 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1440 * If so (likely), disable L0S, so device moves directly L0->L1;
1441 * costs negligible amount of power savings.
1442 * If not (unlikely), enable L0S, so there is at least some
1443 * power savings, even without L1.
fadb3582
BC
1444 */
1445 if (priv->cfg->set_l0s) {
1446 lctl = iwl_pcie_link_ctl(priv);
1447 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1448 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1449 /* L1-ASPM enabled; disable(!) L0S */
1450 iwl_set_bit(priv, CSR_GIO_REG,
1451 CSR_GIO_REG_VAL_L0S_ENABLED);
1452 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1453 } else {
1454 /* L1-ASPM disabled; enable(!) L0S */
1455 iwl_clear_bit(priv, CSR_GIO_REG,
1456 CSR_GIO_REG_VAL_L0S_ENABLED);
1457 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1458 }
1459 }
1460
1461 /* Configure analog phase-lock-loop before activating to D0A */
1462 if (priv->cfg->pll_cfg_val)
1463 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1464
1465 /*
1466 * Set "initialization complete" bit to move adapter from
1467 * D0U* --> D0A* (powered-up active) state.
1468 */
1469 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1470
1471 /*
1472 * Wait for clock stabilization; once stabilized, access to
1473 * device-internal resources is supported, e.g. iwl_write_prph()
1474 * and accesses to uCode SRAM.
1475 */
1476 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1477 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1478 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1479 if (ret < 0) {
1480 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1481 goto out;
1482 }
1483
1484 /*
1485 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1486 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1487 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1488 * and don't need BSM to restore data after power-saving sleep.
1489 *
1490 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1491 * do not disable clocks. This preserves any hardware bits already
1492 * set by default in "CLK_CTRL_REG" after reset.
1493 */
1494 if (priv->cfg->use_bsm)
1495 iwl_write_prph(priv, APMG_CLK_EN_REG,
1496 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1497 else
1498 iwl_write_prph(priv, APMG_CLK_EN_REG,
1499 APMG_CLK_VAL_DMA_CLK_RQT);
1500 udelay(20);
1501
1502 /* Disable L1-Active */
1503 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1504 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1505
1506out:
1507 return ret;
1508}
1509EXPORT_SYMBOL(iwl_apm_init);
1510
1511
1512
8ccde88a
SO
1513void iwl_configure_filter(struct ieee80211_hw *hw,
1514 unsigned int changed_flags,
1515 unsigned int *total_flags,
3ac64bee 1516 u64 multicast)
8ccde88a
SO
1517{
1518 struct iwl_priv *priv = hw->priv;
1519 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
1520
e1623446 1521 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
8ccde88a
SO
1522 changed_flags, *total_flags);
1523
1524 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1525 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
1526 *filter_flags |= RXON_FILTER_PROMISC_MSK;
1527 else
1528 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
1529 }
1530 if (changed_flags & FIF_ALLMULTI) {
1531 if (*total_flags & FIF_ALLMULTI)
1532 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
1533 else
1534 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
1535 }
1536 if (changed_flags & FIF_CONTROL) {
1537 if (*total_flags & FIF_CONTROL)
1538 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
1539 else
1540 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
1541 }
1542 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
1543 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1544 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1545 else
1546 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
1547 }
1548
1549 /* We avoid iwl_commit_rxon here to commit the new filter flags
1550 * since mac80211 will call ieee80211_hw_config immediately.
1551 * (mc_list is not supported at this time). Otherwise, we need to
1552 * queue a background iwl_commit_rxon work.
1553 */
1554
1555 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1556 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1557}
1558EXPORT_SYMBOL(iwl_configure_filter);
1559
da154e30
RR
1560int iwl_set_hw_params(struct iwl_priv *priv)
1561{
da154e30
RR
1562 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1563 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1564 if (priv->cfg->mod_params->amsdu_size_8K)
2f301227 1565 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
da154e30 1566 else
2f301227 1567 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
da154e30 1568
2c2f3b33
TW
1569 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1570
49779293
RR
1571 if (priv->cfg->mod_params->disable_11n)
1572 priv->cfg->sku &= ~IWL_SKU_N;
1573
da154e30
RR
1574 /* Device-specific setup */
1575 return priv->cfg->ops->lib->set_hw_params(priv);
1576}
1577EXPORT_SYMBOL(iwl_set_hw_params);
6ba87956 1578
630fe9b6
TW
1579int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1580{
1581 int ret = 0;
5eadd94b
WYG
1582 s8 prev_tx_power = priv->tx_power_user_lmt;
1583
630fe9b6 1584 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
daf518de
WF
1585 IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
1586 tx_power,
1587 IWL_TX_POWER_TARGET_POWER_MIN);
630fe9b6
TW
1588 return -EINVAL;
1589 }
1590
dc1b0973 1591 if (tx_power > priv->tx_power_device_lmt) {
08f2d58d
WYG
1592 IWL_WARN(priv,
1593 "Requested user TXPOWER %d above upper limit %d.\n",
dc1b0973 1594 tx_power, priv->tx_power_device_lmt);
630fe9b6
TW
1595 return -EINVAL;
1596 }
1597
1598 if (priv->tx_power_user_lmt != tx_power)
1599 force = true;
1600
019fb97d 1601 /* if nic is not up don't send command */
5eadd94b
WYG
1602 if (iwl_is_ready_rf(priv)) {
1603 priv->tx_power_user_lmt = tx_power;
1604 if (force && priv->cfg->ops->lib->send_tx_power)
1605 ret = priv->cfg->ops->lib->send_tx_power(priv);
1606 else if (!priv->cfg->ops->lib->send_tx_power)
1607 ret = -EOPNOTSUPP;
1608 /*
1609 * if fail to set tx_power, restore the orig. tx power
1610 */
1611 if (ret)
1612 priv->tx_power_user_lmt = prev_tx_power;
1613 }
630fe9b6 1614
5eadd94b
WYG
1615 /*
1616 * Even this is an async host command, the command
1617 * will always report success from uCode
1618 * So once driver can placing the command into the queue
1619 * successfully, driver can use priv->tx_power_user_lmt
1620 * to reflect the current tx power
1621 */
630fe9b6
TW
1622 return ret;
1623}
1624EXPORT_SYMBOL(iwl_set_tx_power);
1625
ef850d7c 1626irqreturn_t iwl_isr_legacy(int irq, void *data)
f17d08a6
AK
1627{
1628 struct iwl_priv *priv = data;
1629 u32 inta, inta_mask;
1630 u32 inta_fh;
1631 if (!priv)
1632 return IRQ_NONE;
1633
1634 spin_lock(&priv->lock);
1635
1636 /* Disable (but don't clear!) interrupts here to avoid
1637 * back-to-back ISRs and sporadic interrupts from our NIC.
1638 * If we have something to service, the tasklet will re-enable ints.
1639 * If we *don't* have something, we'll re-enable before leaving here. */
1640 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1641 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1642
1643 /* Discover which interrupts are active/pending */
1644 inta = iwl_read32(priv, CSR_INT);
1645 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1646
1647 /* Ignore interrupt if there's nothing in NIC to service.
1648 * This may be due to IRQ shared with another device,
1649 * or due to sporadic interrupts thrown from our NIC. */
1650 if (!inta && !inta_fh) {
1651 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1652 goto none;
1653 }
1654
1655 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1656 /* Hardware disappeared. It might have already raised
1657 * an interrupt */
1658 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1659 goto unplugged;
1660 }
1661
1662 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1663 inta, inta_mask, inta_fh);
1664
1665 inta &= ~CSR_INT_BIT_SCD;
1666
1667 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1668 if (likely(inta || inta_fh))
1669 tasklet_schedule(&priv->irq_tasklet);
1670
1671 unplugged:
1672 spin_unlock(&priv->lock);
1673 return IRQ_HANDLED;
1674
1675 none:
1676 /* re-enable interrupts here since we don't have anything to service. */
1677 /* only Re-enable if diabled by irq */
1678 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1679 iwl_enable_interrupts(priv);
1680 spin_unlock(&priv->lock);
1681 return IRQ_NONE;
1682}
ef850d7c 1683EXPORT_SYMBOL(iwl_isr_legacy);
f17d08a6 1684
17f841cd
SO
1685int iwl_send_bt_config(struct iwl_priv *priv)
1686{
1687 struct iwl_bt_cmd bt_cmd = {
456d0f76
WYG
1688 .lead_time = BT_LEAD_TIME_DEF,
1689 .max_kill = BT_MAX_KILL_DEF,
17f841cd
SO
1690 .kill_ack_mask = 0,
1691 .kill_cts_mask = 0,
1692 };
1693
06702a73
WYG
1694 if (!bt_coex_active)
1695 bt_cmd.flags = BT_COEX_DISABLE;
1696 else
1697 bt_cmd.flags = BT_COEX_ENABLE;
1698
1699 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1700 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1701
17f841cd
SO
1702 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1703 sizeof(struct iwl_bt_cmd), &bt_cmd);
1704}
1705EXPORT_SYMBOL(iwl_send_bt_config);
1706
ef8d5529 1707int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
49ea8596 1708{
ef8d5529
WYG
1709 struct iwl_statistics_cmd statistics_cmd = {
1710 .configuration_flags =
1711 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
49ea8596 1712 };
ef8d5529
WYG
1713
1714 if (flags & CMD_ASYNC)
1715 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1716 sizeof(struct iwl_statistics_cmd),
1717 &statistics_cmd, NULL);
1718 else
1719 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1720 sizeof(struct iwl_statistics_cmd),
1721 &statistics_cmd);
49ea8596
EG
1722}
1723EXPORT_SYMBOL(iwl_send_statistics_request);
7e8c519e 1724
b0692f2f
EG
1725/**
1726 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1727 * using sample data 100 bytes apart. If these sample points are good,
1728 * it's a pretty good bet that everything between them is good, too.
1729 */
1730static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1731{
1732 u32 val;
1733 int ret = 0;
1734 u32 errcnt = 0;
1735 u32 i;
1736
e1623446 1737 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
b0692f2f 1738
b0692f2f
EG
1739 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1740 /* read data comes through single port, auto-incr addr */
1741 /* NOTE: Use the debugless read so we don't flood kernel log
1742 * if IWL_DL_IO is set */
1743 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 1744 i + IWL49_RTC_INST_LOWER_BOUND);
b0692f2f
EG
1745 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1746 if (val != le32_to_cpu(*image)) {
1747 ret = -EIO;
1748 errcnt++;
1749 if (errcnt >= 3)
1750 break;
1751 }
1752 }
1753
b0692f2f
EG
1754 return ret;
1755}
1756
1757/**
1758 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1759 * looking at all data.
1760 */
1761static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1762 u32 len)
1763{
1764 u32 val;
1765 u32 save_len = len;
1766 int ret = 0;
1767 u32 errcnt;
1768
e1623446 1769 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
b0692f2f 1770
250bdd21
SO
1771 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1772 IWL49_RTC_INST_LOWER_BOUND);
b0692f2f
EG
1773
1774 errcnt = 0;
1775 for (; len > 0; len -= sizeof(u32), image++) {
1776 /* read data comes through single port, auto-incr addr */
1777 /* NOTE: Use the debugless read so we don't flood kernel log
1778 * if IWL_DL_IO is set */
1779 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1780 if (val != le32_to_cpu(*image)) {
15b1687c 1781 IWL_ERR(priv, "uCode INST section is invalid at "
b0692f2f
EG
1782 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1783 save_len - len, val, le32_to_cpu(*image));
1784 ret = -EIO;
1785 errcnt++;
1786 if (errcnt >= 20)
1787 break;
1788 }
1789 }
1790
b0692f2f 1791 if (!errcnt)
e1623446
TW
1792 IWL_DEBUG_INFO(priv,
1793 "ucode image in INSTRUCTION memory is good\n");
b0692f2f
EG
1794
1795 return ret;
1796}
1797
1798/**
1799 * iwl_verify_ucode - determine which instruction image is in SRAM,
1800 * and verify its contents
1801 */
1802int iwl_verify_ucode(struct iwl_priv *priv)
1803{
1804 __le32 *image;
1805 u32 len;
1806 int ret;
1807
1808 /* Try bootstrap */
1809 image = (__le32 *)priv->ucode_boot.v_addr;
1810 len = priv->ucode_boot.len;
1811 ret = iwlcore_verify_inst_sparse(priv, image, len);
1812 if (!ret) {
e1623446 1813 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
b0692f2f
EG
1814 return 0;
1815 }
1816
1817 /* Try initialize */
1818 image = (__le32 *)priv->ucode_init.v_addr;
1819 len = priv->ucode_init.len;
1820 ret = iwlcore_verify_inst_sparse(priv, image, len);
1821 if (!ret) {
e1623446 1822 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
b0692f2f
EG
1823 return 0;
1824 }
1825
1826 /* Try runtime/protocol */
1827 image = (__le32 *)priv->ucode_code.v_addr;
1828 len = priv->ucode_code.len;
1829 ret = iwlcore_verify_inst_sparse(priv, image, len);
1830 if (!ret) {
e1623446 1831 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
b0692f2f
EG
1832 return 0;
1833 }
1834
15b1687c 1835 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
b0692f2f
EG
1836
1837 /* Since nothing seems to match, show first several data entries in
1838 * instruction SRAM, so maybe visual inspection will give a clue.
1839 * Selection of bootstrap image (vs. other images) is arbitrary. */
1840 image = (__le32 *)priv->ucode_boot.v_addr;
1841 len = priv->ucode_boot.len;
1842 ret = iwl_verify_inst_full(priv, image, len);
1843
1844 return ret;
1845}
1846EXPORT_SYMBOL(iwl_verify_ucode);
1847
56e12615 1848
47f4a587
EG
1849void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1850{
1851 struct iwl_ct_kill_config cmd;
672639de 1852 struct iwl_ct_kill_throttling_config adv_cmd;
47f4a587
EG
1853 unsigned long flags;
1854 int ret = 0;
1855
1856 spin_lock_irqsave(&priv->lock, flags);
1857 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1858 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1859 spin_unlock_irqrestore(&priv->lock, flags);
3ad3b92a 1860 priv->thermal_throttle.ct_kill_toggle = false;
47f4a587 1861
480e8407 1862 if (priv->cfg->support_ct_kill_exit) {
672639de
WYG
1863 adv_cmd.critical_temperature_enter =
1864 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1865 adv_cmd.critical_temperature_exit =
1866 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1867
1868 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1869 sizeof(adv_cmd), &adv_cmd);
d91b1ba3
WYG
1870 if (ret)
1871 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1872 else
1873 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1874 "succeeded, "
1875 "critical temperature enter is %d,"
1876 "exit is %d\n",
1877 priv->hw_params.ct_kill_threshold,
1878 priv->hw_params.ct_kill_exit_threshold);
480e8407 1879 } else {
672639de
WYG
1880 cmd.critical_temperature_R =
1881 cpu_to_le32(priv->hw_params.ct_kill_threshold);
189a2b59 1882
672639de
WYG
1883 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1884 sizeof(cmd), &cmd);
d91b1ba3
WYG
1885 if (ret)
1886 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1887 else
1888 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1889 "succeeded, "
1890 "critical temperature is %d\n",
1891 priv->hw_params.ct_kill_threshold);
672639de 1892 }
47f4a587
EG
1893}
1894EXPORT_SYMBOL(iwl_rf_kill_ct_config);
14a08a7f 1895
0ad91a35 1896
14a08a7f
EG
1897/*
1898 * CARD_STATE_CMD
1899 *
1900 * Use: Sets the device's internal card state to enable, disable, or halt
1901 *
1902 * When in the 'enable' state the card operates as normal.
1903 * When in the 'disable' state, the card enters into a low power mode.
1904 * When in the 'halt' state, the card is shut down and must be fully
1905 * restarted to come back on.
1906 */
c496294e 1907int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
14a08a7f
EG
1908{
1909 struct iwl_host_cmd cmd = {
1910 .id = REPLY_CARD_STATE_CMD,
1911 .len = sizeof(u32),
1912 .data = &flags,
c2acea8e 1913 .flags = meta_flag,
14a08a7f
EG
1914 };
1915
1916 return iwl_send_cmd(priv, &cmd);
1917}
1918
030f05ed
AK
1919void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1920 struct iwl_rx_mem_buffer *rxb)
1921{
1922#ifdef CONFIG_IWLWIFI_DEBUG
2f301227 1923 struct iwl_rx_packet *pkt = rxb_addr(rxb);
030f05ed
AK
1924 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1925 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1926 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1927#endif
1928}
1929EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1930
1931void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1932 struct iwl_rx_mem_buffer *rxb)
1933{
2f301227 1934 struct iwl_rx_packet *pkt = rxb_addr(rxb);
396887a2 1935 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
030f05ed 1936 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
396887a2
DH
1937 "notification for %s:\n", len,
1938 get_cmd_string(pkt->hdr.cmd));
1939 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
030f05ed
AK
1940}
1941EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
261b9c33
AK
1942
1943void iwl_rx_reply_error(struct iwl_priv *priv,
1944 struct iwl_rx_mem_buffer *rxb)
1945{
2f301227 1946 struct iwl_rx_packet *pkt = rxb_addr(rxb);
261b9c33
AK
1947
1948 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1949 "seq 0x%04X ser 0x%08X\n",
1950 le32_to_cpu(pkt->u.err_resp.error_type),
1951 get_cmd_string(pkt->u.err_resp.cmd_id),
1952 pkt->u.err_resp.cmd_id,
1953 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1954 le32_to_cpu(pkt->u.err_resp.error_info));
1955}
1956EXPORT_SYMBOL(iwl_rx_reply_error);
1957
a83b9141
WYG
1958void iwl_clear_isr_stats(struct iwl_priv *priv)
1959{
1960 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1961}
a83b9141 1962
488829f1
AK
1963int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1964 const struct ieee80211_tx_queue_params *params)
1965{
1966 struct iwl_priv *priv = hw->priv;
1967 unsigned long flags;
1968 int q;
1969
1970 IWL_DEBUG_MAC80211(priv, "enter\n");
1971
1972 if (!iwl_is_ready_rf(priv)) {
1973 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1974 return -EIO;
1975 }
1976
1977 if (queue >= AC_NUM) {
1978 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1979 return 0;
1980 }
1981
1982 q = AC_NUM - 1 - queue;
1983
1984 spin_lock_irqsave(&priv->lock, flags);
1985
1986 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
1987 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
1988 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1989 priv->qos_data.def_qos_parm.ac[q].edca_txop =
1990 cpu_to_le16((params->txop * 32));
1991
1992 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1993 priv->qos_data.qos_active = 1;
1994
1995 if (priv->iw_mode == NL80211_IFTYPE_AP)
1996 iwl_activate_qos(priv, 1);
1997 else if (priv->assoc_id && iwl_is_associated(priv))
1998 iwl_activate_qos(priv, 0);
1999
2000 spin_unlock_irqrestore(&priv->lock, flags);
2001
2002 IWL_DEBUG_MAC80211(priv, "leave\n");
2003 return 0;
2004}
2005EXPORT_SYMBOL(iwl_mac_conf_tx);
5bbe233b
AK
2006
2007static void iwl_ht_conf(struct iwl_priv *priv,
02bb1bea 2008 struct ieee80211_bss_conf *bss_conf)
5bbe233b 2009{
fad95bf5 2010 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
5bbe233b
AK
2011 struct ieee80211_sta *sta;
2012
2013 IWL_DEBUG_MAC80211(priv, "enter: \n");
2014
fad95bf5 2015 if (!ht_conf->is_ht)
5bbe233b
AK
2016 return;
2017
fad95bf5 2018 ht_conf->ht_protection =
9ed6bcce 2019 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
fad95bf5 2020 ht_conf->non_GF_STA_present =
9ed6bcce 2021 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5bbe233b 2022
02bb1bea
JB
2023 ht_conf->single_chain_sufficient = false;
2024
2025 switch (priv->iw_mode) {
2026 case NL80211_IFTYPE_STATION:
2027 rcu_read_lock();
5ed176e1 2028 sta = ieee80211_find_sta(priv->vif, priv->bssid);
02bb1bea
JB
2029 if (sta) {
2030 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2031 int maxstreams;
2032
2033 maxstreams = (ht_cap->mcs.tx_params &
2034 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2035 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2036 maxstreams += 1;
2037
2038 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2039 (ht_cap->mcs.rx_mask[2] == 0))
2040 ht_conf->single_chain_sufficient = true;
2041 if (maxstreams <= 1)
2042 ht_conf->single_chain_sufficient = true;
2043 } else {
2044 /*
2045 * If at all, this can only happen through a race
2046 * when the AP disconnects us while we're still
2047 * setting up the connection, in that case mac80211
2048 * will soon tell us about that.
2049 */
2050 ht_conf->single_chain_sufficient = true;
2051 }
2052 rcu_read_unlock();
2053 break;
2054 case NL80211_IFTYPE_ADHOC:
2055 ht_conf->single_chain_sufficient = true;
2056 break;
2057 default:
2058 break;
2059 }
5bbe233b
AK
2060
2061 IWL_DEBUG_MAC80211(priv, "leave\n");
2062}
2063
c91c3efc
AK
2064static inline void iwl_set_no_assoc(struct iwl_priv *priv)
2065{
2066 priv->assoc_id = 0;
2067 iwl_led_disassociate(priv);
2068 /*
2069 * inform the ucode that there is no longer an
2070 * association and that no more packets should be
2071 * sent
2072 */
2073 priv->staging_rxon.filter_flags &=
2074 ~RXON_FILTER_ASSOC_MSK;
2075 priv->staging_rxon.assoc_id = 0;
2076 iwlcore_commit_rxon(priv);
2077}
2078
5bbe233b
AK
2079#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
2080void iwl_bss_info_changed(struct ieee80211_hw *hw,
2d0ddec5
JB
2081 struct ieee80211_vif *vif,
2082 struct ieee80211_bss_conf *bss_conf,
2083 u32 changes)
5bbe233b
AK
2084{
2085 struct iwl_priv *priv = hw->priv;
3a650292 2086 int ret;
5bbe233b
AK
2087
2088 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2089
2d0ddec5
JB
2090 if (!iwl_is_alive(priv))
2091 return;
2092
2093 mutex_lock(&priv->mutex);
2094
2095 if (changes & BSS_CHANGED_BEACON &&
2096 priv->iw_mode == NL80211_IFTYPE_AP) {
2097 dev_kfree_skb(priv->ibss_beacon);
2098 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
2099 }
2100
d7129e19
JB
2101 if (changes & BSS_CHANGED_BEACON_INT) {
2102 priv->beacon_int = bss_conf->beacon_int;
2103 /* TODO: in AP mode, do something to make this take effect */
2104 }
2105
2106 if (changes & BSS_CHANGED_BSSID) {
2107 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2108
2109 /*
2110 * If there is currently a HW scan going on in the
2111 * background then we need to cancel it else the RXON
2112 * below/in post_associate will fail.
2113 */
2d0ddec5 2114 if (iwl_scan_cancel_timeout(priv, 100)) {
d7129e19 2115 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
2d0ddec5
JB
2116 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2117 mutex_unlock(&priv->mutex);
2118 return;
2119 }
2d0ddec5 2120
d7129e19
JB
2121 /* mac80211 only sets assoc when in STATION mode */
2122 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
2123 bss_conf->assoc) {
2124 memcpy(priv->staging_rxon.bssid_addr,
2125 bss_conf->bssid, ETH_ALEN);
2d0ddec5 2126
d7129e19
JB
2127 /* currently needed in a few places */
2128 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2129 } else {
2130 priv->staging_rxon.filter_flags &=
2131 ~RXON_FILTER_ASSOC_MSK;
2d0ddec5 2132 }
d7129e19 2133
2d0ddec5
JB
2134 }
2135
d7129e19
JB
2136 /*
2137 * This needs to be after setting the BSSID in case
2138 * mac80211 decides to do both changes at once because
2139 * it will invoke post_associate.
2140 */
2d0ddec5
JB
2141 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2142 changes & BSS_CHANGED_BEACON) {
2143 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2144
2145 if (beacon)
2146 iwl_mac_beacon_update(hw, beacon);
2147 }
2148
5bbe233b
AK
2149 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2150 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2151 bss_conf->use_short_preamble);
2152 if (bss_conf->use_short_preamble)
2153 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2154 else
2155 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2156 }
2157
2158 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2159 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
2160 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
2161 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
2162 else
2163 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2164 }
2165
d7129e19
JB
2166 if (changes & BSS_CHANGED_BASIC_RATES) {
2167 /* XXX use this information
2168 *
2169 * To do that, remove code from iwl_set_rate() and put something
2170 * like this here:
2171 *
2172 if (A-band)
2173 priv->staging_rxon.ofdm_basic_rates =
2174 bss_conf->basic_rates;
2175 else
2176 priv->staging_rxon.ofdm_basic_rates =
2177 bss_conf->basic_rates >> 4;
2178 priv->staging_rxon.cck_basic_rates =
2179 bss_conf->basic_rates & 0xF;
2180 */
2181 }
2182
5bbe233b
AK
2183 if (changes & BSS_CHANGED_HT) {
2184 iwl_ht_conf(priv, bss_conf);
45823531
AK
2185
2186 if (priv->cfg->ops->hcmd->set_rxon_chain)
2187 priv->cfg->ops->hcmd->set_rxon_chain(priv);
5bbe233b
AK
2188 }
2189
2190 if (changes & BSS_CHANGED_ASSOC) {
2191 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
5bbe233b
AK
2192 if (bss_conf->assoc) {
2193 priv->assoc_id = bss_conf->aid;
2194 priv->beacon_int = bss_conf->beacon_int;
5bbe233b
AK
2195 priv->timestamp = bss_conf->timestamp;
2196 priv->assoc_capability = bss_conf->assoc_capability;
2197
e932a609
JB
2198 iwl_led_associate(priv);
2199
d7129e19
JB
2200 /*
2201 * We have just associated, don't start scan too early
2202 * leave time for EAPOL exchange to complete.
2203 *
2204 * XXX: do this in mac80211
5bbe233b
AK
2205 */
2206 priv->next_scan_jiffies = jiffies +
2207 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
d7129e19
JB
2208 if (!iwl_is_rfkill(priv))
2209 priv->cfg->ops->lib->post_associate(priv);
c91c3efc
AK
2210 } else
2211 iwl_set_no_assoc(priv);
d7129e19
JB
2212 }
2213
2214 if (changes && iwl_is_associated(priv) && priv->assoc_id) {
2215 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2216 changes);
2217 ret = iwl_send_rxon_assoc(priv);
2218 if (!ret) {
2219 /* Sync active_rxon with latest change. */
2220 memcpy((void *)&priv->active_rxon,
2221 &priv->staging_rxon,
2222 sizeof(struct iwl_rxon_cmd));
5bbe233b 2223 }
5bbe233b 2224 }
d7129e19 2225
c91c3efc
AK
2226 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2227 if (vif->bss_conf.enable_beacon) {
2228 memcpy(priv->staging_rxon.bssid_addr,
2229 bss_conf->bssid, ETH_ALEN);
2230 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2231 iwlcore_config_ap(priv);
2232 } else
2233 iwl_set_no_assoc(priv);
f513dfff
DH
2234 }
2235
d7129e19
JB
2236 mutex_unlock(&priv->mutex);
2237
2d0ddec5 2238 IWL_DEBUG_MAC80211(priv, "leave\n");
5bbe233b
AK
2239}
2240EXPORT_SYMBOL(iwl_bss_info_changed);
2241
9944b938
AK
2242int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2243{
2244 struct iwl_priv *priv = hw->priv;
2245 unsigned long flags;
2246 __le64 timestamp;
2247
2248 IWL_DEBUG_MAC80211(priv, "enter\n");
2249
2250 if (!iwl_is_ready_rf(priv)) {
2251 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2252 return -EIO;
2253 }
2254
9944b938
AK
2255 spin_lock_irqsave(&priv->lock, flags);
2256
2257 if (priv->ibss_beacon)
2258 dev_kfree_skb(priv->ibss_beacon);
2259
2260 priv->ibss_beacon = skb;
2261
2262 priv->assoc_id = 0;
2263 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2264 priv->timestamp = le64_to_cpu(timestamp);
2265
2266 IWL_DEBUG_MAC80211(priv, "leave\n");
2267 spin_unlock_irqrestore(&priv->lock, flags);
2268
2269 iwl_reset_qos(priv);
2270
2271 priv->cfg->ops->lib->post_associate(priv);
2272
2273
2274 return 0;
2275}
2276EXPORT_SYMBOL(iwl_mac_beacon_update);
2277
727882d6
AK
2278int iwl_set_mode(struct iwl_priv *priv, int mode)
2279{
2280 if (mode == NL80211_IFTYPE_ADHOC) {
2281 const struct iwl_channel_info *ch_info;
2282
2283 ch_info = iwl_get_channel_info(priv,
2284 priv->band,
2285 le16_to_cpu(priv->staging_rxon.channel));
2286
2287 if (!ch_info || !is_channel_ibss(ch_info)) {
2288 IWL_ERR(priv, "channel %d not IBSS channel\n",
2289 le16_to_cpu(priv->staging_rxon.channel));
2290 return -EINVAL;
2291 }
2292 }
2293
2294 iwl_connection_init_rx_config(priv, mode);
2295
2296 if (priv->cfg->ops->hcmd->set_rxon_chain)
2297 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2298
2299 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2300
c587de0b 2301 iwl_clear_stations_table(priv);
727882d6
AK
2302
2303 /* dont commit rxon if rf-kill is on*/
2304 if (!iwl_is_ready_rf(priv))
2305 return -EAGAIN;
2306
727882d6
AK
2307 iwlcore_commit_rxon(priv);
2308
2309 return 0;
2310}
2311EXPORT_SYMBOL(iwl_set_mode);
2312
cbb6ab94 2313int iwl_mac_add_interface(struct ieee80211_hw *hw,
1ed32e4f 2314 struct ieee80211_vif *vif)
cbb6ab94
AK
2315{
2316 struct iwl_priv *priv = hw->priv;
47e28f41 2317 int err = 0;
cbb6ab94 2318
1ed32e4f 2319 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
cbb6ab94 2320
47e28f41
JB
2321 mutex_lock(&priv->mutex);
2322
cbb6ab94
AK
2323 if (priv->vif) {
2324 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
47e28f41
JB
2325 err = -EOPNOTSUPP;
2326 goto out;
cbb6ab94
AK
2327 }
2328
1ed32e4f
JB
2329 priv->vif = vif;
2330 priv->iw_mode = vif->type;
cbb6ab94 2331
1ed32e4f
JB
2332 if (vif->addr) {
2333 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2334 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
cbb6ab94
AK
2335 }
2336
1ed32e4f 2337 if (iwl_set_mode(priv, vif->type) == -EAGAIN)
cbb6ab94
AK
2338 /* we are not ready, will run again when ready */
2339 set_bit(STATUS_MODE_PENDING, &priv->status);
2340
47e28f41 2341 out:
cbb6ab94
AK
2342 mutex_unlock(&priv->mutex);
2343
2344 IWL_DEBUG_MAC80211(priv, "leave\n");
47e28f41 2345 return err;
cbb6ab94
AK
2346}
2347EXPORT_SYMBOL(iwl_mac_add_interface);
2348
d8052319 2349void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1ed32e4f 2350 struct ieee80211_vif *vif)
d8052319
AK
2351{
2352 struct iwl_priv *priv = hw->priv;
2353
2354 IWL_DEBUG_MAC80211(priv, "enter\n");
2355
2356 mutex_lock(&priv->mutex);
2357
2358 if (iwl_is_ready_rf(priv)) {
2359 iwl_scan_cancel_timeout(priv, 100);
2360 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2361 iwlcore_commit_rxon(priv);
2362 }
1ed32e4f 2363 if (priv->vif == vif) {
d8052319
AK
2364 priv->vif = NULL;
2365 memset(priv->bssid, 0, ETH_ALEN);
2366 }
2367 mutex_unlock(&priv->mutex);
2368
2369 IWL_DEBUG_MAC80211(priv, "leave\n");
2370
2371}
2372EXPORT_SYMBOL(iwl_mac_remove_interface);
2373
4808368d
AK
2374/**
2375 * iwl_mac_config - mac80211 config callback
2376 *
2377 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2378 * be set inappropriately and the driver currently sets the hardware up to
2379 * use it whenever needed.
2380 */
2381int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2382{
2383 struct iwl_priv *priv = hw->priv;
2384 const struct iwl_channel_info *ch_info;
2385 struct ieee80211_conf *conf = &hw->conf;
fad95bf5 2386 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
4808368d
AK
2387 unsigned long flags = 0;
2388 int ret = 0;
2389 u16 ch;
2390 int scan_active = 0;
2391
2392 mutex_lock(&priv->mutex);
2393
4808368d
AK
2394 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2395 conf->channel->hw_value, changed);
2396
2397 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2398 test_bit(STATUS_SCANNING, &priv->status))) {
2399 scan_active = 1;
2400 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2401 }
2402
ba37a3d0
JB
2403 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2404 IEEE80211_CONF_CHANGE_CHANNEL)) {
2405 /* mac80211 uses static for non-HT which is what we want */
2406 priv->current_ht_config.smps = conf->smps_mode;
2407
2408 /*
2409 * Recalculate chain counts.
2410 *
2411 * If monitor mode is enabled then mac80211 will
2412 * set up the SM PS mode to OFF if an HT channel is
2413 * configured.
2414 */
2415 if (priv->cfg->ops->hcmd->set_rxon_chain)
2416 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2417 }
4808368d
AK
2418
2419 /* during scanning mac80211 will delay channel setting until
2420 * scan finish with changed = 0
2421 */
2422 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2423 if (scan_active)
2424 goto set_ch_out;
2425
2426 ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
2427 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
2428 if (!is_channel_valid(ch_info)) {
2429 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2430 ret = -EINVAL;
2431 goto set_ch_out;
2432 }
2433
4808368d
AK
2434 spin_lock_irqsave(&priv->lock, flags);
2435
28bd723b
DH
2436 /* Configure HT40 channels */
2437 ht_conf->is_ht = conf_is_ht(conf);
2438 if (ht_conf->is_ht) {
2439 if (conf_is_ht40_minus(conf)) {
2440 ht_conf->extension_chan_offset =
2441 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
c812ee24 2442 ht_conf->is_40mhz = true;
28bd723b
DH
2443 } else if (conf_is_ht40_plus(conf)) {
2444 ht_conf->extension_chan_offset =
2445 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
c812ee24 2446 ht_conf->is_40mhz = true;
28bd723b
DH
2447 } else {
2448 ht_conf->extension_chan_offset =
2449 IEEE80211_HT_PARAM_CHA_SEC_NONE;
c812ee24 2450 ht_conf->is_40mhz = false;
28bd723b
DH
2451 }
2452 } else
c812ee24 2453 ht_conf->is_40mhz = false;
28bd723b
DH
2454 /* Default to no protection. Protection mode will later be set
2455 * from BSS config in iwl_ht_conf */
2456 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
4808368d
AK
2457
2458 /* if we are switching from ht to 2.4 clear flags
2459 * from any ht related info since 2.4 does not
2460 * support ht */
2461 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2462 priv->staging_rxon.flags = 0;
2463
2464 iwl_set_rxon_channel(priv, conf->channel);
5e2f75b8 2465 iwl_set_rxon_ht(priv, ht_conf);
4808368d
AK
2466
2467 iwl_set_flags_for_band(priv, conf->channel->band);
2468 spin_unlock_irqrestore(&priv->lock, flags);
0924e519
WYG
2469 if (iwl_is_associated(priv) &&
2470 (le16_to_cpu(priv->active_rxon.channel) != ch) &&
2471 priv->cfg->ops->lib->set_channel_switch) {
2472 iwl_set_rate(priv);
2473 /*
2474 * at this point, staging_rxon has the
2475 * configuration for channel switch
2476 */
2477 ret = priv->cfg->ops->lib->set_channel_switch(priv,
2478 ch);
2479 if (!ret) {
2480 iwl_print_rx_config_cmd(priv);
2481 goto out;
2482 }
2483 priv->switch_rxon.switch_in_progress = false;
2484 }
4808368d
AK
2485 set_ch_out:
2486 /* The list of supported rates and rate mask can be different
2487 * for each band; since the band may have changed, reset
2488 * the rate mask to what mac80211 lists */
2489 iwl_set_rate(priv);
2490 }
2491
78f5fb7f
JB
2492 if (changed & (IEEE80211_CONF_CHANGE_PS |
2493 IEEE80211_CONF_CHANGE_IDLE)) {
e312c24c 2494 ret = iwl_power_update_mode(priv, false);
4808368d 2495 if (ret)
e312c24c 2496 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
4808368d
AK
2497 }
2498
2499 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2500 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2501 priv->tx_power_user_lmt, conf->power_level);
2502
2503 iwl_set_tx_power(priv, conf->power_level, false);
2504 }
2505
0cf4c01e
MA
2506 if (!iwl_is_ready(priv)) {
2507 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2508 goto out;
2509 }
2510
4808368d
AK
2511 if (scan_active)
2512 goto out;
2513
2514 if (memcmp(&priv->active_rxon,
2515 &priv->staging_rxon, sizeof(priv->staging_rxon)))
2516 iwlcore_commit_rxon(priv);
2517 else
2518 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
2519
2520
2521out:
2522 IWL_DEBUG_MAC80211(priv, "leave\n");
2523 mutex_unlock(&priv->mutex);
2524 return ret;
2525}
2526EXPORT_SYMBOL(iwl_mac_config);
2527
bd564261
AK
2528void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2529{
2530 struct iwl_priv *priv = hw->priv;
2531 unsigned long flags;
2532
2533 mutex_lock(&priv->mutex);
2534 IWL_DEBUG_MAC80211(priv, "enter\n");
2535
2536 spin_lock_irqsave(&priv->lock, flags);
fad95bf5 2537 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
bd564261
AK
2538 spin_unlock_irqrestore(&priv->lock, flags);
2539
2540 iwl_reset_qos(priv);
2541
2542 spin_lock_irqsave(&priv->lock, flags);
2543 priv->assoc_id = 0;
2544 priv->assoc_capability = 0;
2545 priv->assoc_station_added = 0;
2546
2547 /* new association get rid of ibss beacon skb */
2548 if (priv->ibss_beacon)
2549 dev_kfree_skb(priv->ibss_beacon);
2550
2551 priv->ibss_beacon = NULL;
2552
57c4d7b4 2553 priv->beacon_int = priv->vif->bss_conf.beacon_int;
bd564261 2554 priv->timestamp = 0;
bd564261
AK
2555
2556 spin_unlock_irqrestore(&priv->lock, flags);
2557
2558 if (!iwl_is_ready_rf(priv)) {
2559 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2560 mutex_unlock(&priv->mutex);
2561 return;
2562 }
2563
2564 /* we are restarting association process
2565 * clear RXON_FILTER_ASSOC_MSK bit
2566 */
b4665df4
JB
2567 iwl_scan_cancel_timeout(priv, 100);
2568 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2569 iwlcore_commit_rxon(priv);
bd564261
AK
2570
2571 iwl_set_rate(priv);
2572
2573 mutex_unlock(&priv->mutex);
2574
2575 IWL_DEBUG_MAC80211(priv, "leave\n");
2576}
2577EXPORT_SYMBOL(iwl_mac_reset_tsf);
2578
88804e2b
WYG
2579int iwl_alloc_txq_mem(struct iwl_priv *priv)
2580{
2581 if (!priv->txq)
2582 priv->txq = kzalloc(
2583 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2584 GFP_KERNEL);
2585 if (!priv->txq) {
2586 IWL_ERR(priv, "Not enough memory for txq \n");
2587 return -ENOMEM;
2588 }
2589 return 0;
2590}
2591EXPORT_SYMBOL(iwl_alloc_txq_mem);
2592
2593void iwl_free_txq_mem(struct iwl_priv *priv)
2594{
2595 kfree(priv->txq);
2596 priv->txq = NULL;
2597}
2598EXPORT_SYMBOL(iwl_free_txq_mem);
2599
1933ac4d
WYG
2600int iwl_send_wimax_coex(struct iwl_priv *priv)
2601{
2602 struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
2603
2604 if (priv->cfg->support_wimax_coexist) {
2605 /* UnMask wake up src at associated sleep */
2606 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2607
2608 /* UnMask wake up src at unassociated sleep */
2609 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
2610 memcpy(coex_cmd.sta_prio, cu_priorities,
2611 sizeof(struct iwl_wimax_coex_event_entry) *
2612 COEX_NUM_OF_EVENTS);
2613
2614 /* enabling the coexistence feature */
2615 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
2616
2617 /* enabling the priorities tables */
2618 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
2619 } else {
2620 /* coexistence is disabled */
2621 memset(&coex_cmd, 0, sizeof(coex_cmd));
2622 }
2623 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
2624 sizeof(coex_cmd), &coex_cmd);
2625}
2626EXPORT_SYMBOL(iwl_send_wimax_coex);
2627
20594eb0
WYG
2628#ifdef CONFIG_IWLWIFI_DEBUGFS
2629
2630#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
2631
2632void iwl_reset_traffic_log(struct iwl_priv *priv)
2633{
2634 priv->tx_traffic_idx = 0;
2635 priv->rx_traffic_idx = 0;
2636 if (priv->tx_traffic)
2637 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2638 if (priv->rx_traffic)
2639 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2640}
2641
2642int iwl_alloc_traffic_mem(struct iwl_priv *priv)
2643{
2644 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
2645
2646 if (iwl_debug_level & IWL_DL_TX) {
2647 if (!priv->tx_traffic) {
2648 priv->tx_traffic =
2649 kzalloc(traffic_size, GFP_KERNEL);
2650 if (!priv->tx_traffic)
2651 return -ENOMEM;
2652 }
2653 }
2654 if (iwl_debug_level & IWL_DL_RX) {
2655 if (!priv->rx_traffic) {
2656 priv->rx_traffic =
2657 kzalloc(traffic_size, GFP_KERNEL);
2658 if (!priv->rx_traffic)
2659 return -ENOMEM;
2660 }
2661 }
2662 iwl_reset_traffic_log(priv);
2663 return 0;
2664}
2665EXPORT_SYMBOL(iwl_alloc_traffic_mem);
2666
2667void iwl_free_traffic_mem(struct iwl_priv *priv)
2668{
2669 kfree(priv->tx_traffic);
2670 priv->tx_traffic = NULL;
2671
2672 kfree(priv->rx_traffic);
2673 priv->rx_traffic = NULL;
2674}
2675EXPORT_SYMBOL(iwl_free_traffic_mem);
2676
2677void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
2678 u16 length, struct ieee80211_hdr *header)
2679{
2680 __le16 fc;
2681 u16 len;
2682
2683 if (likely(!(iwl_debug_level & IWL_DL_TX)))
2684 return;
2685
2686 if (!priv->tx_traffic)
2687 return;
2688
2689 fc = header->frame_control;
2690 if (ieee80211_is_data(fc)) {
2691 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
2692 ? IWL_TRAFFIC_ENTRY_SIZE : length;
2693 memcpy((priv->tx_traffic +
2694 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
2695 header, len);
2696 priv->tx_traffic_idx =
2697 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
2698 }
2699}
2700EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
2701
2702void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
2703 u16 length, struct ieee80211_hdr *header)
2704{
2705 __le16 fc;
2706 u16 len;
2707
2708 if (likely(!(iwl_debug_level & IWL_DL_RX)))
2709 return;
2710
2711 if (!priv->rx_traffic)
2712 return;
2713
2714 fc = header->frame_control;
2715 if (ieee80211_is_data(fc)) {
2716 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
2717 ? IWL_TRAFFIC_ENTRY_SIZE : length;
2718 memcpy((priv->rx_traffic +
2719 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
2720 header, len);
2721 priv->rx_traffic_idx =
2722 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
2723 }
2724}
2725EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
22fdf3c9
WYG
2726
2727const char *get_mgmt_string(int cmd)
2728{
2729 switch (cmd) {
2730 IWL_CMD(MANAGEMENT_ASSOC_REQ);
2731 IWL_CMD(MANAGEMENT_ASSOC_RESP);
2732 IWL_CMD(MANAGEMENT_REASSOC_REQ);
2733 IWL_CMD(MANAGEMENT_REASSOC_RESP);
2734 IWL_CMD(MANAGEMENT_PROBE_REQ);
2735 IWL_CMD(MANAGEMENT_PROBE_RESP);
2736 IWL_CMD(MANAGEMENT_BEACON);
2737 IWL_CMD(MANAGEMENT_ATIM);
2738 IWL_CMD(MANAGEMENT_DISASSOC);
2739 IWL_CMD(MANAGEMENT_AUTH);
2740 IWL_CMD(MANAGEMENT_DEAUTH);
2741 IWL_CMD(MANAGEMENT_ACTION);
2742 default:
2743 return "UNKNOWN";
2744
2745 }
2746}
2747
2748const char *get_ctrl_string(int cmd)
2749{
2750 switch (cmd) {
2751 IWL_CMD(CONTROL_BACK_REQ);
2752 IWL_CMD(CONTROL_BACK);
2753 IWL_CMD(CONTROL_PSPOLL);
2754 IWL_CMD(CONTROL_RTS);
2755 IWL_CMD(CONTROL_CTS);
2756 IWL_CMD(CONTROL_ACK);
2757 IWL_CMD(CONTROL_CFEND);
2758 IWL_CMD(CONTROL_CFENDACK);
2759 default:
2760 return "UNKNOWN";
2761
2762 }
2763}
2764
7163b8a4 2765void iwl_clear_traffic_stats(struct iwl_priv *priv)
22fdf3c9
WYG
2766{
2767 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
22fdf3c9 2768 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
7163b8a4 2769 priv->led_tpt = 0;
22fdf3c9
WYG
2770}
2771
2772/*
2773 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
2774 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
2775 * Use debugFs to display the rx/rx_statistics
2776 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
2777 * information will be recorded, but DATA pkt still will be recorded
2778 * for the reason of iwl_led.c need to control the led blinking based on
2779 * number of tx and rx data.
2780 *
2781 */
2782void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
2783{
2784 struct traffic_stats *stats;
2785
2786 if (is_tx)
2787 stats = &priv->tx_stats;
2788 else
2789 stats = &priv->rx_stats;
2790
2791 if (ieee80211_is_mgmt(fc)) {
2792 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2793 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2794 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
2795 break;
2796 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2797 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
2798 break;
2799 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2800 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
2801 break;
2802 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2803 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
2804 break;
2805 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2806 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
2807 break;
2808 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2809 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
2810 break;
2811 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2812 stats->mgmt[MANAGEMENT_BEACON]++;
2813 break;
2814 case cpu_to_le16(IEEE80211_STYPE_ATIM):
2815 stats->mgmt[MANAGEMENT_ATIM]++;
2816 break;
2817 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2818 stats->mgmt[MANAGEMENT_DISASSOC]++;
2819 break;
2820 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2821 stats->mgmt[MANAGEMENT_AUTH]++;
2822 break;
2823 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2824 stats->mgmt[MANAGEMENT_DEAUTH]++;
2825 break;
2826 case cpu_to_le16(IEEE80211_STYPE_ACTION):
2827 stats->mgmt[MANAGEMENT_ACTION]++;
2828 break;
2829 }
2830 } else if (ieee80211_is_ctl(fc)) {
2831 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2832 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
2833 stats->ctrl[CONTROL_BACK_REQ]++;
2834 break;
2835 case cpu_to_le16(IEEE80211_STYPE_BACK):
2836 stats->ctrl[CONTROL_BACK]++;
2837 break;
2838 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
2839 stats->ctrl[CONTROL_PSPOLL]++;
2840 break;
2841 case cpu_to_le16(IEEE80211_STYPE_RTS):
2842 stats->ctrl[CONTROL_RTS]++;
2843 break;
2844 case cpu_to_le16(IEEE80211_STYPE_CTS):
2845 stats->ctrl[CONTROL_CTS]++;
2846 break;
2847 case cpu_to_le16(IEEE80211_STYPE_ACK):
2848 stats->ctrl[CONTROL_ACK]++;
2849 break;
2850 case cpu_to_le16(IEEE80211_STYPE_CFEND):
2851 stats->ctrl[CONTROL_CFEND]++;
2852 break;
2853 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
2854 stats->ctrl[CONTROL_CFENDACK]++;
2855 break;
2856 }
2857 } else {
2858 /* data */
2859 stats->data_cnt++;
2860 stats->data_bytes += len;
2861 }
d5f4cf71 2862 iwl_leds_background(priv);
22fdf3c9
WYG
2863}
2864EXPORT_SYMBOL(iwl_update_stats);
20594eb0
WYG
2865#endif
2866
696bdee3
WYG
2867const static char *get_csr_string(int cmd)
2868{
2869 switch (cmd) {
2870 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2871 IWL_CMD(CSR_INT_COALESCING);
2872 IWL_CMD(CSR_INT);
2873 IWL_CMD(CSR_INT_MASK);
2874 IWL_CMD(CSR_FH_INT_STATUS);
2875 IWL_CMD(CSR_GPIO_IN);
2876 IWL_CMD(CSR_RESET);
2877 IWL_CMD(CSR_GP_CNTRL);
2878 IWL_CMD(CSR_HW_REV);
2879 IWL_CMD(CSR_EEPROM_REG);
2880 IWL_CMD(CSR_EEPROM_GP);
2881 IWL_CMD(CSR_OTP_GP_REG);
2882 IWL_CMD(CSR_GIO_REG);
2883 IWL_CMD(CSR_GP_UCODE_REG);
2884 IWL_CMD(CSR_GP_DRIVER_REG);
2885 IWL_CMD(CSR_UCODE_DRV_GP1);
2886 IWL_CMD(CSR_UCODE_DRV_GP2);
2887 IWL_CMD(CSR_LED_REG);
2888 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2889 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2890 IWL_CMD(CSR_ANA_PLL_CFG);
2891 IWL_CMD(CSR_HW_REV_WA_REG);
2892 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2893 default:
2894 return "UNKNOWN";
2895
2896 }
2897}
2898
2899void iwl_dump_csr(struct iwl_priv *priv)
2900{
2901 int i;
2902 u32 csr_tbl[] = {
2903 CSR_HW_IF_CONFIG_REG,
2904 CSR_INT_COALESCING,
2905 CSR_INT,
2906 CSR_INT_MASK,
2907 CSR_FH_INT_STATUS,
2908 CSR_GPIO_IN,
2909 CSR_RESET,
2910 CSR_GP_CNTRL,
2911 CSR_HW_REV,
2912 CSR_EEPROM_REG,
2913 CSR_EEPROM_GP,
2914 CSR_OTP_GP_REG,
2915 CSR_GIO_REG,
2916 CSR_GP_UCODE_REG,
2917 CSR_GP_DRIVER_REG,
2918 CSR_UCODE_DRV_GP1,
2919 CSR_UCODE_DRV_GP2,
2920 CSR_LED_REG,
2921 CSR_DRAM_INT_TBL_REG,
2922 CSR_GIO_CHICKEN_BITS,
2923 CSR_ANA_PLL_CFG,
2924 CSR_HW_REV_WA_REG,
2925 CSR_DBG_HPET_MEM_REG
2926 };
2927 IWL_ERR(priv, "CSR values:\n");
2928 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2929 "CSR_INT_PERIODIC_REG)\n");
2930 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2931 IWL_ERR(priv, " %25s: 0X%08x\n",
2932 get_csr_string(csr_tbl[i]),
2933 iwl_read32(priv, csr_tbl[i]));
2934 }
2935}
2936EXPORT_SYMBOL(iwl_dump_csr);
2937
1b3eb823
WYG
2938const static char *get_fh_string(int cmd)
2939{
2940 switch (cmd) {
2941 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2942 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2943 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2944 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2945 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2946 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2947 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2948 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2949 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2950 default:
2951 return "UNKNOWN";
2952
2953 }
2954}
2955
2956int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2957{
2958 int i;
2959#ifdef CONFIG_IWLWIFI_DEBUG
2960 int pos = 0;
2961 size_t bufsz = 0;
2962#endif
2963 u32 fh_tbl[] = {
2964 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2965 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2966 FH_RSCSR_CHNL0_WPTR,
2967 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2968 FH_MEM_RSSR_SHARED_CTRL_REG,
2969 FH_MEM_RSSR_RX_STATUS_REG,
2970 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2971 FH_TSSR_TX_STATUS_REG,
2972 FH_TSSR_TX_ERROR_REG
2973 };
2974#ifdef CONFIG_IWLWIFI_DEBUG
2975 if (display) {
2976 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2977 *buf = kmalloc(bufsz, GFP_KERNEL);
2978 if (!*buf)
2979 return -ENOMEM;
2980 pos += scnprintf(*buf + pos, bufsz - pos,
2981 "FH register values:\n");
2982 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2983 pos += scnprintf(*buf + pos, bufsz - pos,
2984 " %34s: 0X%08x\n",
2985 get_fh_string(fh_tbl[i]),
2986 iwl_read_direct32(priv, fh_tbl[i]));
2987 }
2988 return pos;
2989 }
2990#endif
2991 IWL_ERR(priv, "FH register values:\n");
2992 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2993 IWL_ERR(priv, " %34s: 0X%08x\n",
2994 get_fh_string(fh_tbl[i]),
2995 iwl_read_direct32(priv, fh_tbl[i]));
2996 }
2997 return 0;
2998}
2999EXPORT_SYMBOL(iwl_dump_fh);
3000
a93e7973 3001static void iwl_force_rf_reset(struct iwl_priv *priv)
afbdd69a
WYG
3002{
3003 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3004 return;
3005
3006 if (!iwl_is_associated(priv)) {
3007 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
3008 return;
3009 }
3010 /*
3011 * There is no easy and better way to force reset the radio,
3012 * the only known method is switching channel which will force to
3013 * reset and tune the radio.
3014 * Use internal short scan (single channel) operation to should
3015 * achieve this objective.
3016 * Driver should reset the radio when number of consecutive missed
3017 * beacon, or any other uCode error condition detected.
3018 */
3019 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
3020 iwl_internal_short_hw_scan(priv);
3021 return;
3022}
a93e7973 3023
a93e7973
WYG
3024
3025int iwl_force_reset(struct iwl_priv *priv, int mode)
3026{
8a472da4
WYG
3027 struct iwl_force_reset *force_reset;
3028
a93e7973
WYG
3029 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3030 return -EINVAL;
3031
8a472da4
WYG
3032 if (mode >= IWL_MAX_FORCE_RESET) {
3033 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
3034 return -EINVAL;
3035 }
3036 force_reset = &priv->force_reset[mode];
3037 force_reset->reset_request_count++;
3038 if (force_reset->last_force_reset_jiffies &&
3039 time_after(force_reset->last_force_reset_jiffies +
3040 force_reset->reset_duration, jiffies)) {
a93e7973 3041 IWL_DEBUG_INFO(priv, "force reset rejected\n");
8a472da4 3042 force_reset->reset_reject_count++;
a93e7973
WYG
3043 return -EAGAIN;
3044 }
8a472da4
WYG
3045 force_reset->reset_success_count++;
3046 force_reset->last_force_reset_jiffies = jiffies;
a93e7973 3047 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
a93e7973
WYG
3048 switch (mode) {
3049 case IWL_RF_RESET:
3050 iwl_force_rf_reset(priv);
3051 break;
3052 case IWL_FW_RESET:
3053 IWL_ERR(priv, "On demand firmware reload\n");
3054 /* Set the FW error flag -- cleared on iwl_down */
3055 set_bit(STATUS_FW_ERROR, &priv->status);
3056 wake_up_interruptible(&priv->wait_command_queue);
3057 /*
3058 * Keep the restart process from trying to send host
3059 * commands by clearing the INIT status bit
3060 */
3061 clear_bit(STATUS_READY, &priv->status);
3062 queue_work(priv->workqueue, &priv->restart);
3063 break;
a93e7973 3064 }
a93e7973
WYG
3065 return 0;
3066}
afbdd69a 3067
6da3a13e
WYG
3068#ifdef CONFIG_PM
3069
3070int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3071{
3072 struct iwl_priv *priv = pci_get_drvdata(pdev);
3073
3074 /*
3075 * This function is called when system goes into suspend state
3076 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
3077 * first but since iwl_mac_stop() has no knowledge of who the caller is,
3078 * it will not call apm_ops.stop() to stop the DMA operation.
3079 * Calling apm_ops.stop here to make sure we stop the DMA.
3080 */
3081 priv->cfg->ops->lib->apm_ops.stop(priv);
3082
3083 pci_save_state(pdev);
3084 pci_disable_device(pdev);
3085 pci_set_power_state(pdev, PCI_D3hot);
3086
3087 return 0;
3088}
3089EXPORT_SYMBOL(iwl_pci_suspend);
3090
3091int iwl_pci_resume(struct pci_dev *pdev)
3092{
3093 struct iwl_priv *priv = pci_get_drvdata(pdev);
3094 int ret;
3095
3096 pci_set_power_state(pdev, PCI_D0);
3097 ret = pci_enable_device(pdev);
3098 if (ret)
3099 return ret;
3100 pci_restore_state(pdev);
3101 iwl_enable_interrupts(priv);
3102
3103 return 0;
3104}
3105EXPORT_SYMBOL(iwl_pci_resume);
3106
3107#endif /* CONFIG_PM */