]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/wireless/iwlwifi/iwl4965-base.c
iwlwifi: Don't send host commands on rfkill
[net-next-2.6.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
b481de9c
ZY
44#include <net/mac80211.h>
45
46#include <asm/div64.h>
47
b481de9c
ZY
48#include "iwl-4965.h"
49#include "iwl-helpers.h"
50
c8b0e6e1 51#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 52u32 iwl4965_debug_level;
b481de9c
ZY
53#endif
54
bb8c093b
CH
55static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
56 struct iwl4965_tx_queue *txq);
416e1438 57
b481de9c
ZY
58/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64/* module parameters */
6440adb5
BC
65static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
66static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
9fbab516
BC
67static int iwl4965_param_disable; /* def: enable radio */
68static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
69int iwl4965_param_hwcrypto; /* def: using software encryption */
6440adb5
BC
70static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
71int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
9ee1ba47 72int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
b481de9c
ZY
73
74/*
75 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */
78
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80
c8b0e6e1 81#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
82#define VD "d"
83#else
84#define VD
85#endif
86
c8b0e6e1 87#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
88#define VS "s"
89#else
90#define VS
91#endif
92
71972664 93#define IWLWIFI_VERSION "1.2.23k" VD VS
b481de9c
ZY
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102
103MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION);
105MODULE_AUTHOR(DRV_COPYRIGHT);
106MODULE_LICENSE("GPL");
107
108__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
109{
110 u16 fc = le16_to_cpu(hdr->frame_control);
111 int hdr_len = ieee80211_get_hdrlen(fc);
112
113 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
114 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
115 return NULL;
116}
117
bb8c093b
CH
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode)
b481de9c
ZY
120{
121 int i;
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128}
129
bb8c093b 130static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
131{
132 /* Single white space is for Linksys APs */
133 if (essid_len == 1 && essid[0] == ' ')
134 return 1;
135
136 /* Otherwise, if the entire essid is 0, we assume it is hidden */
137 while (essid_len) {
138 essid_len--;
139 if (essid[essid_len] != '\0')
140 return 0;
141 }
142
143 return 1;
144}
145
bb8c093b 146static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
147{
148 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
149 const char *s = essid;
150 char *d = escaped;
151
bb8c093b 152 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
153 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
154 return escaped;
155 }
156
157 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
158 while (essid_len--) {
159 if (*s == '\0') {
160 *d++ = '\\';
161 *d++ = '0';
162 s++;
163 } else
164 *d++ = *s++;
165 }
166 *d = '\0';
167 return escaped;
168}
169
bb8c093b 170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
b481de9c 171{
c8b0e6e1 172#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 173 if (!(iwl4965_debug_level & level))
b481de9c
ZY
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services
183 *
184 * Theory of operation
185 *
6440adb5
BC
186 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
187 * of buffer descriptors, each of which points to one or more data buffers for
188 * the device to read from or fill. Driver and device exchange status of each
189 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
190 * entries in each circular buffer, to protect against confusing empty and full
191 * queue states.
192 *
193 * The device reads or writes the data in the queues via the device's several
194 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
195 *
196 * For Tx queue, there are low mark and high mark limits. If, after queuing
197 * the packet for Tx, free space become < low mark, Tx queue stopped. When
198 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
199 * Tx queue resumed.
200 *
6440adb5
BC
201 * The 4965 operates with up to 17 queues: One receive queue, one transmit
202 * queue (#4) for sending commands to the device firmware, and 15 other
203 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
204 *
205 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
206 ***************************************************/
207
bb8c093b 208static int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 209{
fc4b6853 210 int s = q->read_ptr - q->write_ptr;
b481de9c 211
fc4b6853 212 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
213 s -= q->n_bd;
214
215 if (s <= 0)
216 s += q->n_window;
217 /* keep some reserve to not confuse empty and full situations */
218 s -= 2;
219 if (s < 0)
220 s = 0;
221 return s;
222}
223
6440adb5
BC
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
bb8c093b 229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
b481de9c
ZY
230{
231 return ++index & (n_bd - 1);
232}
233
6440adb5
BC
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
bb8c093b 239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
240{
241 return --index & (n_bd - 1);
242}
243
bb8c093b 244static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 245{
fc4b6853
TW
246 return q->write_ptr > q->read_ptr ?
247 (i >= q->read_ptr && i < q->write_ptr) :
248 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
249}
250
bb8c093b 251static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 252{
6440adb5 253 /* This is for scan command, the big buffer at end of command array */
b481de9c 254 if (is_huge)
6440adb5 255 return q->n_window; /* must be power of 2 */
b481de9c 256
6440adb5 257 /* Otherwise, use normal size buffers */
b481de9c
ZY
258 return index & (q->n_window - 1);
259}
260
6440adb5
BC
261/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */
bb8c093b 264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
265 int count, int slots_num, u32 id)
266{
267 q->n_bd = count;
268 q->n_window = slots_num;
269 q->id = id;
270
bb8c093b
CH
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */
b481de9c
ZY
273 BUG_ON(!is_power_of_2(count));
274
275 /* slots_num must be power-of-two size, otherwise
276 * get_cmd_index is broken. */
277 BUG_ON(!is_power_of_2(slots_num));
278
279 q->low_mark = q->n_window / 4;
280 if (q->low_mark < 4)
281 q->low_mark = 4;
282
283 q->high_mark = q->n_window / 8;
284 if (q->high_mark < 2)
285 q->high_mark = 2;
286
fc4b6853 287 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
288
289 return 0;
290}
291
6440adb5
BC
292/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */
bb8c093b
CH
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
297{
298 struct pci_dev *dev = priv->pci_dev;
299
6440adb5
BC
300 /* Driver private data, only for Tx (not command) queues,
301 * not shared with device. */
b481de9c
ZY
302 if (id != IWL_CMD_QUEUE_NUM) {
303 txq->txb = kmalloc(sizeof(txq->txb[0]) *
304 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
305 if (!txq->txb) {
01ebd063 306 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
307 "structures failed\n");
308 goto error;
309 }
310 } else
311 txq->txb = NULL;
312
6440adb5
BC
313 /* Circular buffer of transmit frame descriptors (TFDs),
314 * shared with device */
b481de9c
ZY
315 txq->bd = pci_alloc_consistent(dev,
316 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
317 &txq->q.dma_addr);
318
319 if (!txq->bd) {
320 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
321 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
322 goto error;
323 }
324 txq->q.id = id;
325
326 return 0;
327
328 error:
329 if (txq->txb) {
330 kfree(txq->txb);
331 txq->txb = NULL;
332 }
333
334 return -ENOMEM;
335}
336
8b6eaea8
BC
337/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */
bb8c093b
CH
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
342{
343 struct pci_dev *dev = priv->pci_dev;
344 int len;
345 int rc = 0;
346
8b6eaea8
BC
347 /*
348 * Alloc buffer array for commands (Tx or other types of commands).
349 * For the command queue (#4), allocate command space + one big
350 * command for scan, since scan command is very huge; the system will
351 * not have two scans at the same time, so only one is needed.
bb54244b 352 * For normal Tx queues (all other queues), no super-size command
8b6eaea8
BC
353 * space is needed.
354 */
bb8c093b 355 len = sizeof(struct iwl4965_cmd) * slots_num;
b481de9c
ZY
356 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
359 if (!txq->cmd)
360 return -ENOMEM;
361
8b6eaea8 362 /* Alloc driver data array and TFD circular buffer */
bb8c093b 363 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
364 if (rc) {
365 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
366
367 return -ENOMEM;
368 }
369 txq->need_update = 0;
370
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
bb8c093b 372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */
b481de9c 373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
BC
374
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 376 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 377
8b6eaea8 378 /* Tell device where to find queue */
bb8c093b 379 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
380
381 return 0;
382}
383
384/**
bb8c093b 385 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
386 * @txq: Transmit queue to deallocate.
387 *
388 * Empty queue by removing and destroying all BD's.
6440adb5
BC
389 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 391 */
bb8c093b 392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 393{
bb8c093b 394 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
395 struct pci_dev *dev = priv->pci_dev;
396 int len;
397
398 if (q->n_bd == 0)
399 return;
400
401 /* first, empty all BD's */
fc4b6853 402 for (; q->write_ptr != q->read_ptr;
bb8c093b
CH
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 405
bb8c093b 406 len = sizeof(struct iwl4965_cmd) * q->n_window;
b481de9c
ZY
407 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE;
409
6440adb5 410 /* De-alloc array of command/tx buffers */
b481de9c
ZY
411 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
412
6440adb5 413 /* De-alloc circular buffer of TFDs */
b481de9c 414 if (txq->q.n_bd)
bb8c093b 415 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
416 txq->q.n_bd, txq->bd, txq->q.dma_addr);
417
6440adb5 418 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
419 if (txq->txb) {
420 kfree(txq->txb);
421 txq->txb = NULL;
422 }
423
6440adb5 424 /* 0-fill queue descriptor structure */
b481de9c
ZY
425 memset(txq, 0, sizeof(*txq));
426}
427
bb8c093b 428const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
429
430/*************** STATION TABLE MANAGEMENT ****
9fbab516 431 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
432 * the functionality provided here
433 */
434
435/**************************************************************/
436
01ebd063 437#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
438/**
439 * iwl4965_remove_station - Remove driver's knowledge of station.
440 *
441 * NOTE: This does not remove station from device's station table.
442 */
bb8c093b 443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
444{
445 int index = IWL_INVALID_STATION;
446 int i;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->sta_lock, flags);
450
451 if (is_ap)
452 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id;
455 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
457 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) {
460 index = i;
461 break;
462 }
463
464 if (unlikely(index == IWL_INVALID_STATION))
465 goto out;
466
467 if (priv->stations[index].used) {
468 priv->stations[index].used = 0;
469 priv->num_stations--;
470 }
471
472 BUG_ON(priv->num_stations < 0);
473
474out:
475 spin_unlock_irqrestore(&priv->sta_lock, flags);
476 return 0;
477}
556f8db7 478#endif
b481de9c 479
6440adb5
BC
480/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
bb8c093b 485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
b481de9c
ZY
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
6440adb5
BC
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */
67d62035
RR
500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
501 int is_ap, u8 flags, void *ht_data)
b481de9c
ZY
502{
503 int i;
504 int index = IWL_INVALID_STATION;
bb8c093b 505 struct iwl4965_station_entry *station;
b481de9c 506 unsigned long flags_spin;
0795af57 507 DECLARE_MAC_BUF(mac);
b481de9c
ZY
508
509 spin_lock_irqsave(&priv->sta_lock, flags_spin);
510 if (is_ap)
511 index = IWL_AP_ID;
512 else if (is_broadcast_ether_addr(addr))
513 index = priv->hw_setting.bcast_sta_id;
514 else
515 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
516 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
517 addr)) {
518 index = i;
519 break;
520 }
521
522 if (!priv->stations[i].used &&
523 index == IWL_INVALID_STATION)
524 index = i;
525 }
526
527
9fbab516
BC
528 /* These two conditions have the same outcome, but keep them separate
529 since they have different meanings */
b481de9c
ZY
530 if (unlikely(index == IWL_INVALID_STATION)) {
531 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
532 return index;
533 }
534
535 if (priv->stations[index].used &&
536 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
537 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
538 return index;
539 }
540
541
0795af57 542 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
543 station = &priv->stations[index];
544 station->used = 1;
545 priv->num_stations++;
546
6440adb5 547 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 548 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
549 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
550 station->sta.mode = 0;
551 station->sta.sta.sta_id = index;
552 station->sta.station_flags = 0;
553
c8b0e6e1 554#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
555 /* BCAST station and IBSS stations do not work in HT mode */
556 if (index != priv->hw_setting.bcast_sta_id &&
557 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
67d62035
RR
558 iwl4965_set_ht_add_station(priv, index,
559 (struct ieee80211_ht_info *) ht_data);
c8b0e6e1 560#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
561
562 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
563
564 /* Add station to device's station table */
bb8c093b 565 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
566 return index;
567
568}
569
570/*************** DRIVER STATUS FUNCTIONS *****/
571
bb8c093b 572static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
b481de9c
ZY
573{
574 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
575 * set but EXIT_PENDING is not */
576 return test_bit(STATUS_READY, &priv->status) &&
577 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
578 !test_bit(STATUS_EXIT_PENDING, &priv->status);
579}
580
bb8c093b 581static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
b481de9c
ZY
582{
583 return test_bit(STATUS_ALIVE, &priv->status);
584}
585
bb8c093b 586static inline int iwl4965_is_init(struct iwl4965_priv *priv)
b481de9c
ZY
587{
588 return test_bit(STATUS_INIT, &priv->status);
589}
590
bb8c093b 591static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
b481de9c
ZY
592{
593 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
594 test_bit(STATUS_RF_KILL_SW, &priv->status);
595}
596
bb8c093b 597static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
b481de9c
ZY
598{
599
bb8c093b 600 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
601 return 0;
602
bb8c093b 603 return iwl4965_is_ready(priv);
b481de9c
ZY
604}
605
606/*************** HOST COMMAND QUEUE FUNCTIONS *****/
607
608#define IWL_CMD(x) case x : return #x
609
610static const char *get_cmd_string(u8 cmd)
611{
612 switch (cmd) {
613 IWL_CMD(REPLY_ALIVE);
614 IWL_CMD(REPLY_ERROR);
615 IWL_CMD(REPLY_RXON);
616 IWL_CMD(REPLY_RXON_ASSOC);
617 IWL_CMD(REPLY_QOS_PARAM);
618 IWL_CMD(REPLY_RXON_TIMING);
619 IWL_CMD(REPLY_ADD_STA);
620 IWL_CMD(REPLY_REMOVE_STA);
621 IWL_CMD(REPLY_REMOVE_ALL_STA);
622 IWL_CMD(REPLY_TX);
623 IWL_CMD(REPLY_RATE_SCALE);
624 IWL_CMD(REPLY_LEDS_CMD);
625 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
626 IWL_CMD(RADAR_NOTIFICATION);
627 IWL_CMD(REPLY_QUIET_CMD);
628 IWL_CMD(REPLY_CHANNEL_SWITCH);
629 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
630 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
631 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
632 IWL_CMD(POWER_TABLE_CMD);
633 IWL_CMD(PM_SLEEP_NOTIFICATION);
634 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
635 IWL_CMD(REPLY_SCAN_CMD);
636 IWL_CMD(REPLY_SCAN_ABORT_CMD);
637 IWL_CMD(SCAN_START_NOTIFICATION);
638 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
639 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
640 IWL_CMD(BEACON_NOTIFICATION);
641 IWL_CMD(REPLY_TX_BEACON);
642 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
643 IWL_CMD(QUIET_NOTIFICATION);
644 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
645 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
646 IWL_CMD(REPLY_BT_CONFIG);
647 IWL_CMD(REPLY_STATISTICS_CMD);
648 IWL_CMD(STATISTICS_NOTIFICATION);
649 IWL_CMD(REPLY_CARD_STATE_CMD);
650 IWL_CMD(CARD_STATE_NOTIFICATION);
651 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
652 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
653 IWL_CMD(SENSITIVITY_CMD);
654 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
655 IWL_CMD(REPLY_RX_PHY_CMD);
656 IWL_CMD(REPLY_RX_MPDU_CMD);
657 IWL_CMD(REPLY_4965_RX);
658 IWL_CMD(REPLY_COMPRESSED_BA);
659 default:
660 return "UNKNOWN";
661
662 }
663}
664
665#define HOST_COMPLETE_TIMEOUT (HZ / 2)
666
667/**
bb8c093b 668 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
669 * @priv: device private data point
670 * @cmd: a point to the ucode command structure
671 *
672 * The function returns < 0 values to indicate the operation is
673 * failed. On success, it turns the index (> 0) of command in the
674 * command queue.
675 */
bb8c093b 676static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 677{
bb8c093b
CH
678 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
679 struct iwl4965_queue *q = &txq->q;
680 struct iwl4965_tfd_frame *tfd;
b481de9c 681 u32 *control_flags;
bb8c093b 682 struct iwl4965_cmd *out_cmd;
b481de9c
ZY
683 u32 idx;
684 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
685 dma_addr_t phys_addr;
686 int ret;
687 unsigned long flags;
688
689 /* If any of the command structures end up being larger than
690 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
691 * we will need to increase the size of the TFD entries */
692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
693 !(cmd->meta.flags & CMD_SIZE_HUGE));
694
c342a1b9
GG
695 if (iwl4965_is_rfkill(priv)) {
696 IWL_DEBUG_INFO("Not sending command - RF KILL");
697 return -EIO;
698 }
699
bb8c093b 700 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
701 IWL_ERROR("No space for Tx\n");
702 return -ENOSPC;
703 }
704
705 spin_lock_irqsave(&priv->hcmd_lock, flags);
706
fc4b6853 707 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
708 memset(tfd, 0, sizeof(*tfd));
709
710 control_flags = (u32 *) tfd;
711
fc4b6853 712 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
713 out_cmd = &txq->cmd[idx];
714
715 out_cmd->hdr.cmd = cmd->id;
716 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
717 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
718
719 /* At this point, the out_cmd now has all of the incoming cmd
720 * information */
721
722 out_cmd->hdr.flags = 0;
723 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 724 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
725 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
726 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
727
728 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
bb8c093b
CH
729 offsetof(struct iwl4965_cmd, hdr);
730 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
731
732 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
733 "%d bytes at %d[%d]:%d\n",
734 get_cmd_string(out_cmd->hdr.cmd),
735 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 736 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
737
738 txq->need_update = 1;
6440adb5
BC
739
740 /* Set up entry in queue's byte count circular buffer */
b481de9c 741 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
BC
742
743 /* Increment and update queue's write index */
bb8c093b
CH
744 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
745 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
746
747 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
748 return ret ? ret : idx;
749}
750
bb8c093b 751static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
752{
753 int ret;
754
755 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
756
757 /* An asynchronous command can not expect an SKB to be set. */
758 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
759
760 /* An asynchronous command MUST have a callback. */
761 BUG_ON(!cmd->meta.u.callback);
762
763 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
764 return -EBUSY;
765
bb8c093b 766 ret = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c 767 if (ret < 0) {
bb8c093b 768 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
769 get_cmd_string(cmd->id), ret);
770 return ret;
771 }
772 return 0;
773}
774
bb8c093b 775static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
776{
777 int cmd_idx;
778 int ret;
779 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
780
781 BUG_ON(cmd->meta.flags & CMD_ASYNC);
782
783 /* A synchronous command can not have a callback set. */
784 BUG_ON(cmd->meta.u.callback != NULL);
785
786 if (atomic_xchg(&entry, 1)) {
787 IWL_ERROR("Error sending %s: Already sending a host command\n",
788 get_cmd_string(cmd->id));
789 return -EBUSY;
790 }
791
792 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
793
794 if (cmd->meta.flags & CMD_WANT_SKB)
795 cmd->meta.source = &cmd->meta;
796
bb8c093b 797 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c
ZY
798 if (cmd_idx < 0) {
799 ret = cmd_idx;
bb8c093b 800 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
801 get_cmd_string(cmd->id), ret);
802 goto out;
803 }
804
805 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
806 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
807 HOST_COMPLETE_TIMEOUT);
808 if (!ret) {
809 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
810 IWL_ERROR("Error sending %s: time out after %dms.\n",
811 get_cmd_string(cmd->id),
812 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
813
814 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
815 ret = -ETIMEDOUT;
816 goto cancel;
817 }
818 }
819
820 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
821 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
822 get_cmd_string(cmd->id));
823 ret = -ECANCELED;
824 goto fail;
825 }
826 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
827 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
828 get_cmd_string(cmd->id));
829 ret = -EIO;
830 goto fail;
831 }
832 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
833 IWL_ERROR("Error: Response NULL in '%s'\n",
834 get_cmd_string(cmd->id));
835 ret = -EIO;
836 goto out;
837 }
838
839 ret = 0;
840 goto out;
841
842cancel:
843 if (cmd->meta.flags & CMD_WANT_SKB) {
bb8c093b 844 struct iwl4965_cmd *qcmd;
b481de9c
ZY
845
846 /* Cancel the CMD_WANT_SKB flag for the cmd in the
847 * TX cmd queue. Otherwise in case the cmd comes
848 * in later, it will possibly set an invalid
849 * address (cmd->meta.source). */
850 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
851 qcmd->meta.flags &= ~CMD_WANT_SKB;
852 }
853fail:
854 if (cmd->meta.u.skb) {
855 dev_kfree_skb_any(cmd->meta.u.skb);
856 cmd->meta.u.skb = NULL;
857 }
858out:
859 atomic_set(&entry, 0);
860 return ret;
861}
862
bb8c093b 863int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 864{
b481de9c 865 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 866 return iwl4965_send_cmd_async(priv, cmd);
b481de9c 867
bb8c093b 868 return iwl4965_send_cmd_sync(priv, cmd);
b481de9c
ZY
869}
870
bb8c093b 871int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
b481de9c 872{
bb8c093b 873 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
874 .id = id,
875 .len = len,
876 .data = data,
877 };
878
bb8c093b 879 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
880}
881
bb8c093b 882static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
b481de9c 883{
bb8c093b 884 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
885 .id = id,
886 .len = sizeof(val),
887 .data = &val,
888 };
889
bb8c093b 890 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
891}
892
bb8c093b 893int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
b481de9c 894{
bb8c093b 895 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
896}
897
898/**
bb8c093b 899 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
900 *
901 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
902 * NOTE: mutex must be held before calling this fnction
903 */
bb8c093b 904static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
b481de9c
ZY
905 const u8 *addr, int is_ap)
906{
556f8db7 907 u8 sta_id;
b481de9c 908
6440adb5 909 /* Add station to device's station table */
67d62035
RR
910#ifdef CONFIG_IWL4965_HT
911 struct ieee80211_conf *conf = &priv->hw->conf;
912 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
913
914 if ((is_ap) &&
915 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
916 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
917 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
918 0, cur_ht_config);
919 else
920#endif /* CONFIG_IWL4965_HT */
921 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
922 0, NULL);
6440adb5
BC
923
924 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
925 iwl4965_add_station(priv, addr, is_ap);
926
556f8db7 927 return sta_id;
b481de9c
ZY
928}
929
930/**
bb8c093b 931 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
b481de9c
ZY
932 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
933 * @channel: Any channel valid for the requested phymode
934
935 * In addition to setting the staging RXON, priv->phymode is also set.
936 *
937 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
938 * in the staging RXON flag structure based on the phymode
939 */
9fbab516
BC
940static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
941 u16 channel)
b481de9c 942{
bb8c093b 943 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
b481de9c
ZY
944 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
945 channel, phymode);
946 return -EINVAL;
947 }
948
949 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
950 (priv->phymode == phymode))
951 return 0;
952
953 priv->staging_rxon.channel = cpu_to_le16(channel);
954 if (phymode == MODE_IEEE80211A)
955 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
956 else
957 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
958
959 priv->phymode = phymode;
960
961 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
962
963 return 0;
964}
965
966/**
bb8c093b 967 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
968 *
969 * NOTE: This is really only useful during development and can eventually
970 * be #ifdef'd out once the driver is stable and folks aren't actively
971 * making changes
972 */
bb8c093b 973static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
974{
975 int error = 0;
976 int counter = 1;
977
978 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
979 error |= le32_to_cpu(rxon->flags &
980 (RXON_FLG_TGJ_NARROW_BAND_MSK |
981 RXON_FLG_RADAR_DETECT_MSK));
982 if (error)
983 IWL_WARNING("check 24G fields %d | %d\n",
984 counter++, error);
985 } else {
986 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
987 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
988 if (error)
989 IWL_WARNING("check 52 fields %d | %d\n",
990 counter++, error);
991 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
992 if (error)
993 IWL_WARNING("check 52 CCK %d | %d\n",
994 counter++, error);
995 }
996 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
997 if (error)
998 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
999
1000 /* make sure basic rates 6Mbps and 1Mbps are supported */
1001 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
1002 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
1003 if (error)
1004 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
1005
1006 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
1007 if (error)
1008 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
1009
1010 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
1011 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
1012 if (error)
1013 IWL_WARNING("check CCK and short slot %d | %d\n",
1014 counter++, error);
1015
1016 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
1017 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
1018 if (error)
1019 IWL_WARNING("check CCK & auto detect %d | %d\n",
1020 counter++, error);
1021
1022 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
1023 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
1024 if (error)
1025 IWL_WARNING("check TGG and auto detect %d | %d\n",
1026 counter++, error);
1027
1028 if (error)
1029 IWL_WARNING("Tuning to channel %d\n",
1030 le16_to_cpu(rxon->channel));
1031
1032 if (error) {
bb8c093b 1033 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
1034 return -1;
1035 }
1036 return 0;
1037}
1038
1039/**
9fbab516 1040 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 1041 * @priv: staging_rxon is compared to active_rxon
b481de9c 1042 *
9fbab516
BC
1043 * If the RXON structure is changing enough to require a new tune,
1044 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1045 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 1046 */
bb8c093b 1047static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
b481de9c
ZY
1048{
1049
1050 /* These items are only settable from the full RXON command */
1051 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
1052 compare_ether_addr(priv->staging_rxon.bssid_addr,
1053 priv->active_rxon.bssid_addr) ||
1054 compare_ether_addr(priv->staging_rxon.node_addr,
1055 priv->active_rxon.node_addr) ||
1056 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
1057 priv->active_rxon.wlap_bssid_addr) ||
1058 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
1059 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
1060 (priv->staging_rxon.air_propagation !=
1061 priv->active_rxon.air_propagation) ||
1062 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
1063 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
1064 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
1065 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1066 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1067 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1068 return 1;
1069
1070 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1071 * be updated with the RXON_ASSOC command -- however only some
1072 * flag transitions are allowed using RXON_ASSOC */
1073
1074 /* Check if we are not switching bands */
1075 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1076 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1077 return 1;
1078
1079 /* Check if we are switching association toggle */
1080 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1081 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1082 return 1;
1083
1084 return 0;
1085}
1086
bb8c093b 1087static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
b481de9c
ZY
1088{
1089 int rc = 0;
bb8c093b
CH
1090 struct iwl4965_rx_packet *res = NULL;
1091 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1092 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1093 .id = REPLY_RXON_ASSOC,
1094 .len = sizeof(rxon_assoc),
1095 .meta.flags = CMD_WANT_SKB,
1096 .data = &rxon_assoc,
1097 };
bb8c093b
CH
1098 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1099 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
1100
1101 if ((rxon1->flags == rxon2->flags) &&
1102 (rxon1->filter_flags == rxon2->filter_flags) &&
1103 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1104 (rxon1->ofdm_ht_single_stream_basic_rates ==
1105 rxon2->ofdm_ht_single_stream_basic_rates) &&
1106 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1107 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1108 (rxon1->rx_chain == rxon2->rx_chain) &&
1109 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1110 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1111 return 0;
1112 }
1113
1114 rxon_assoc.flags = priv->staging_rxon.flags;
1115 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1116 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1117 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1118 rxon_assoc.reserved = 0;
1119 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1120 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1121 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1122 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1123 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1124
bb8c093b 1125 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1126 if (rc)
1127 return rc;
1128
bb8c093b 1129 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1130 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1131 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1132 rc = -EIO;
1133 }
1134
1135 priv->alloc_rxb_skb--;
1136 dev_kfree_skb_any(cmd.meta.u.skb);
1137
1138 return rc;
1139}
1140
1141/**
bb8c093b 1142 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 1143 *
01ebd063 1144 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
1145 * the active_rxon structure is updated with the new data. This
1146 * function correctly transitions out of the RXON_ASSOC_MSK state if
1147 * a HW tune is required based on the RXON structure changes.
1148 */
bb8c093b 1149static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
b481de9c
ZY
1150{
1151 /* cast away the const for active_rxon in this function */
bb8c093b 1152 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 1153 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1154 int rc = 0;
1155
bb8c093b 1156 if (!iwl4965_is_alive(priv))
b481de9c
ZY
1157 return -1;
1158
1159 /* always get timestamp with Rx frame */
1160 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1161
bb8c093b 1162 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
1163 if (rc) {
1164 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1165 return -EINVAL;
1166 }
1167
1168 /* If we don't need to send a full RXON, we can use
bb8c093b 1169 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 1170 * and other flags for the current radio configuration. */
bb8c093b
CH
1171 if (!iwl4965_full_rxon_required(priv)) {
1172 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
1173 if (rc) {
1174 IWL_ERROR("Error setting RXON_ASSOC "
1175 "configuration (%d).\n", rc);
1176 return rc;
1177 }
1178
1179 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1180
1181 return 0;
1182 }
1183
1184 /* station table will be cleared */
1185 priv->assoc_station_added = 0;
1186
c8b0e6e1 1187#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1188 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1189 if (!priv->error_recovering)
1190 priv->start_calib = 0;
1191
1192 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1193#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1194
1195 /* If we are currently associated and the new config requires
1196 * an RXON_ASSOC and the new config wants the associated mask enabled,
1197 * we must clear the associated from the active configuration
1198 * before we apply the new config */
bb8c093b 1199 if (iwl4965_is_associated(priv) &&
b481de9c
ZY
1200 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1201 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1202 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1203
bb8c093b
CH
1204 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1205 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
1206 &priv->active_rxon);
1207
1208 /* If the mask clearing failed then we set
1209 * active_rxon back to what it was previously */
1210 if (rc) {
1211 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1212 IWL_ERROR("Error clearing ASSOC_MSK on current "
1213 "configuration (%d).\n", rc);
1214 return rc;
1215 }
b481de9c
ZY
1216 }
1217
1218 IWL_DEBUG_INFO("Sending RXON\n"
1219 "* with%s RXON_FILTER_ASSOC_MSK\n"
1220 "* channel = %d\n"
0795af57 1221 "* bssid = %s\n",
b481de9c
ZY
1222 ((priv->staging_rxon.filter_flags &
1223 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1224 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1225 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1226
1227 /* Apply the new configuration */
bb8c093b
CH
1228 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1229 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
1230 if (rc) {
1231 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1232 return rc;
1233 }
1234
bb8c093b 1235 iwl4965_clear_stations_table(priv);
556f8db7 1236
c8b0e6e1 1237#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1238 if (!priv->error_recovering)
1239 priv->start_calib = 0;
1240
1241 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1242 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1243#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1244
1245 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1246
1247 /* If we issue a new RXON command which required a tune then we must
1248 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 1249 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1250 if (rc) {
1251 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1252 return rc;
1253 }
1254
1255 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 1256 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
1257 IWL_INVALID_STATION) {
1258 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1259 return -EIO;
1260 }
1261
1262 /* If we have set the ASSOC_MSK and we are in BSS mode then
1263 * add the IWL_AP_ID to the station rate table */
bb8c093b 1264 if (iwl4965_is_associated(priv) &&
b481de9c 1265 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 1266 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
1267 == IWL_INVALID_STATION) {
1268 IWL_ERROR("Error adding AP address for transmit.\n");
1269 return -EIO;
1270 }
1271 priv->assoc_station_added = 1;
1272 }
1273
1274 return 0;
1275}
1276
bb8c093b 1277static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
b481de9c 1278{
bb8c093b 1279 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
1280 .flags = 3,
1281 .lead_time = 0xAA,
1282 .max_kill = 1,
1283 .kill_ack_mask = 0,
1284 .kill_cts_mask = 0,
1285 };
1286
bb8c093b
CH
1287 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1288 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
1289}
1290
bb8c093b 1291static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
b481de9c
ZY
1292{
1293 int rc = 0;
bb8c093b
CH
1294 struct iwl4965_rx_packet *res;
1295 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1296 .id = REPLY_SCAN_ABORT_CMD,
1297 .meta.flags = CMD_WANT_SKB,
1298 };
1299
1300 /* If there isn't a scan actively going on in the hardware
1301 * then we are in between scan bands and not actually
1302 * actively scanning, so don't send the abort command */
1303 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1304 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1305 return 0;
1306 }
1307
bb8c093b 1308 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1309 if (rc) {
1310 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1311 return rc;
1312 }
1313
bb8c093b 1314 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1315 if (res->u.status != CAN_ABORT_STATUS) {
1316 /* The scan abort will return 1 for success or
1317 * 2 for "failure". A failure condition can be
1318 * due to simply not being in an active scan which
1319 * can occur if we send the scan abort before we
1320 * the microcode has notified us that a scan is
1321 * completed. */
1322 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1323 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1324 clear_bit(STATUS_SCAN_HW, &priv->status);
1325 }
1326
1327 dev_kfree_skb_any(cmd.meta.u.skb);
1328
1329 return rc;
1330}
1331
bb8c093b
CH
1332static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1333 struct iwl4965_cmd *cmd,
b481de9c
ZY
1334 struct sk_buff *skb)
1335{
1336 return 1;
1337}
1338
1339/*
1340 * CARD_STATE_CMD
1341 *
9fbab516 1342 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1343 *
1344 * When in the 'enable' state the card operates as normal.
1345 * When in the 'disable' state, the card enters into a low power mode.
1346 * When in the 'halt' state, the card is shut down and must be fully
1347 * restarted to come back on.
1348 */
bb8c093b 1349static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1350{
bb8c093b 1351 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1352 .id = REPLY_CARD_STATE_CMD,
1353 .len = sizeof(u32),
1354 .data = &flags,
1355 .meta.flags = meta_flag,
1356 };
1357
1358 if (meta_flag & CMD_ASYNC)
bb8c093b 1359 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1360
bb8c093b 1361 return iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1362}
1363
bb8c093b
CH
1364static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1365 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c 1366{
bb8c093b 1367 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1368
1369 if (!skb) {
1370 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1371 return 1;
1372 }
1373
bb8c093b 1374 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1375 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1376 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1377 res->hdr.flags);
1378 return 1;
1379 }
1380
1381 switch (res->u.add_sta.status) {
1382 case ADD_STA_SUCCESS_MSK:
1383 break;
1384 default:
1385 break;
1386 }
1387
1388 /* We didn't cache the SKB; let the caller free it */
1389 return 1;
1390}
1391
bb8c093b
CH
1392int iwl4965_send_add_station(struct iwl4965_priv *priv,
1393 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1394{
bb8c093b 1395 struct iwl4965_rx_packet *res = NULL;
b481de9c 1396 int rc = 0;
bb8c093b 1397 struct iwl4965_host_cmd cmd = {
b481de9c 1398 .id = REPLY_ADD_STA,
bb8c093b 1399 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1400 .meta.flags = flags,
1401 .data = sta,
1402 };
1403
1404 if (flags & CMD_ASYNC)
bb8c093b 1405 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1406 else
1407 cmd.meta.flags |= CMD_WANT_SKB;
1408
bb8c093b 1409 rc = iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1410
1411 if (rc || (flags & CMD_ASYNC))
1412 return rc;
1413
bb8c093b 1414 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1415 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1416 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1417 res->hdr.flags);
1418 rc = -EIO;
1419 }
1420
1421 if (rc == 0) {
1422 switch (res->u.add_sta.status) {
1423 case ADD_STA_SUCCESS_MSK:
1424 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1425 break;
1426 default:
1427 rc = -EIO;
1428 IWL_WARNING("REPLY_ADD_STA failed\n");
1429 break;
1430 }
1431 }
1432
1433 priv->alloc_rxb_skb--;
1434 dev_kfree_skb_any(cmd.meta.u.skb);
1435
1436 return rc;
1437}
1438
bb8c093b 1439static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv,
b481de9c
ZY
1440 struct ieee80211_key_conf *keyconf,
1441 u8 sta_id)
1442{
1443 unsigned long flags;
1444 __le16 key_flags = 0;
1445
1446 switch (keyconf->alg) {
1447 case ALG_CCMP:
1448 key_flags |= STA_KEY_FLG_CCMP;
1449 key_flags |= cpu_to_le16(
1450 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1451 key_flags &= ~STA_KEY_FLG_INVALID;
1452 break;
1453 case ALG_TKIP:
1454 case ALG_WEP:
b481de9c
ZY
1455 default:
1456 return -EINVAL;
1457 }
1458 spin_lock_irqsave(&priv->sta_lock, flags);
1459 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1460 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1461 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1462 keyconf->keylen);
1463
1464 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1465 keyconf->keylen);
1466 priv->stations[sta_id].sta.key.key_flags = key_flags;
1467 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1468 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1469
1470 spin_unlock_irqrestore(&priv->sta_lock, flags);
1471
1472 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
bb8c093b 1473 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1474 return 0;
1475}
1476
bb8c093b 1477static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
b481de9c
ZY
1478{
1479 unsigned long flags;
1480
1481 spin_lock_irqsave(&priv->sta_lock, flags);
bb8c093b
CH
1482 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1483 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1484 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1485 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1486 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1487 spin_unlock_irqrestore(&priv->sta_lock, flags);
1488
1489 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1490 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1491 return 0;
1492}
1493
bb8c093b 1494static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
b481de9c
ZY
1495{
1496 struct list_head *element;
1497
1498 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1499 priv->frames_count);
1500
1501 while (!list_empty(&priv->free_frames)) {
1502 element = priv->free_frames.next;
1503 list_del(element);
bb8c093b 1504 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1505 priv->frames_count--;
1506 }
1507
1508 if (priv->frames_count) {
1509 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1510 priv->frames_count);
1511 priv->frames_count = 0;
1512 }
1513}
1514
bb8c093b 1515static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
b481de9c 1516{
bb8c093b 1517 struct iwl4965_frame *frame;
b481de9c
ZY
1518 struct list_head *element;
1519 if (list_empty(&priv->free_frames)) {
1520 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1521 if (!frame) {
1522 IWL_ERROR("Could not allocate frame!\n");
1523 return NULL;
1524 }
1525
1526 priv->frames_count++;
1527 return frame;
1528 }
1529
1530 element = priv->free_frames.next;
1531 list_del(element);
bb8c093b 1532 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1533}
1534
bb8c093b 1535static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1536{
1537 memset(frame, 0, sizeof(*frame));
1538 list_add(&frame->list, &priv->free_frames);
1539}
1540
bb8c093b 1541unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
b481de9c
ZY
1542 struct ieee80211_hdr *hdr,
1543 const u8 *dest, int left)
1544{
1545
bb8c093b 1546 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1547 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1548 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1549 return 0;
1550
1551 if (priv->ibss_beacon->len > left)
1552 return 0;
1553
1554 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1555
1556 return priv->ibss_beacon->len;
1557}
1558
bb8c093b 1559int iwl4965_rate_index_from_plcp(int plcp)
b481de9c
ZY
1560{
1561 int i = 0;
1562
77626355 1563 /* 4965 HT rate format */
b481de9c
ZY
1564 if (plcp & RATE_MCS_HT_MSK) {
1565 i = (plcp & 0xff);
1566
1567 if (i >= IWL_RATE_MIMO_6M_PLCP)
1568 i = i - IWL_RATE_MIMO_6M_PLCP;
1569
1570 i += IWL_FIRST_OFDM_RATE;
1571 /* skip 9M not supported in ht*/
1572 if (i >= IWL_RATE_9M_INDEX)
1573 i += 1;
1574 if ((i >= IWL_FIRST_OFDM_RATE) &&
1575 (i <= IWL_LAST_OFDM_RATE))
1576 return i;
77626355
BC
1577
1578 /* 4965 legacy rate format, search for match in table */
b481de9c 1579 } else {
bb8c093b
CH
1580 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1581 if (iwl4965_rates[i].plcp == (plcp &0xFF))
b481de9c
ZY
1582 return i;
1583 }
1584 return -1;
1585}
1586
bb8c093b 1587static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1588{
1589 u8 i;
1590
1591 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1592 i = iwl4965_rates[i].next_ieee) {
b481de9c 1593 if (rate_mask & (1 << i))
bb8c093b 1594 return iwl4965_rates[i].plcp;
b481de9c
ZY
1595 }
1596
1597 return IWL_RATE_INVALID;
1598}
1599
bb8c093b 1600static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
b481de9c 1601{
bb8c093b 1602 struct iwl4965_frame *frame;
b481de9c
ZY
1603 unsigned int frame_size;
1604 int rc;
1605 u8 rate;
1606
bb8c093b 1607 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1608
1609 if (!frame) {
1610 IWL_ERROR("Could not obtain free frame buffer for beacon "
1611 "command.\n");
1612 return -ENOMEM;
1613 }
1614
1615 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1616 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1617 0xFF0);
1618 if (rate == IWL_INVALID_RATE)
1619 rate = IWL_RATE_6M_PLCP;
1620 } else {
bb8c093b 1621 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1622 if (rate == IWL_INVALID_RATE)
1623 rate = IWL_RATE_1M_PLCP;
1624 }
1625
bb8c093b 1626 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1627
bb8c093b 1628 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1629 &frame->u.cmd[0]);
1630
bb8c093b 1631 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1632
1633 return rc;
1634}
1635
1636/******************************************************************************
1637 *
1638 * EEPROM related functions
1639 *
1640 ******************************************************************************/
1641
bb8c093b 1642static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
b481de9c
ZY
1643{
1644 memcpy(mac, priv->eeprom.mac_address, 6);
1645}
1646
74a3a250
RC
1647static inline void iwl4965_eeprom_release_semaphore(struct iwl4965_priv *priv)
1648{
1649 iwl4965_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
1650 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
1651}
1652
b481de9c 1653/**
bb8c093b 1654 * iwl4965_eeprom_init - read EEPROM contents
b481de9c 1655 *
6440adb5 1656 * Load the EEPROM contents from adapter into priv->eeprom
b481de9c
ZY
1657 *
1658 * NOTE: This routine uses the non-debug IO access functions.
1659 */
bb8c093b 1660int iwl4965_eeprom_init(struct iwl4965_priv *priv)
b481de9c 1661{
0e5ce1f3 1662 __le16 *e = (__le16 *)&priv->eeprom;
bb8c093b 1663 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
b481de9c
ZY
1664 u32 r;
1665 int sz = sizeof(priv->eeprom);
1666 int rc;
1667 int i;
1668 u16 addr;
1669
1670 /* The EEPROM structure has several padding buffers within it
1671 * and when adding new EEPROM maps is subject to programmer errors
1672 * which may be very difficult to identify without explicitly
1673 * checking the resulting size of the eeprom map. */
1674 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1675
1676 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1677 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1678 return -ENOENT;
1679 }
1680
6440adb5 1681 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
bb8c093b 1682 rc = iwl4965_eeprom_acquire_semaphore(priv);
b481de9c 1683 if (rc < 0) {
91e17473 1684 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1685 return -ENOENT;
1686 }
1687
1688 /* eeprom is an array of 16bit values */
1689 for (addr = 0; addr < sz; addr += sizeof(u16)) {
bb8c093b
CH
1690 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1691 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
b481de9c
ZY
1692
1693 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1694 i += IWL_EEPROM_ACCESS_DELAY) {
bb8c093b 1695 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
b481de9c
ZY
1696 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1697 break;
1698 udelay(IWL_EEPROM_ACCESS_DELAY);
1699 }
1700
1701 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1702 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1703 rc = -ETIMEDOUT;
1704 goto done;
1705 }
0e5ce1f3 1706 e[addr / 2] = cpu_to_le16(r >> 16);
b481de9c
ZY
1707 }
1708 rc = 0;
1709
1710done:
bb8c093b 1711 iwl4965_eeprom_release_semaphore(priv);
b481de9c
ZY
1712 return rc;
1713}
1714
1715/******************************************************************************
1716 *
1717 * Misc. internal state and helper functions
1718 *
1719 ******************************************************************************/
c8b0e6e1 1720#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
1721
1722/**
bb8c093b 1723 * iwl4965_report_frame - dump frame to syslog during debug sessions
b481de9c 1724 *
9fbab516 1725 * You may hack this function to show different aspects of received frames,
b481de9c
ZY
1726 * including selective frame dumps.
1727 * group100 parameter selects whether to show 1 out of 100 good frames.
1728 *
9fbab516
BC
1729 * TODO: This was originally written for 3945, need to audit for
1730 * proper operation with 4965.
b481de9c 1731 */
bb8c093b
CH
1732void iwl4965_report_frame(struct iwl4965_priv *priv,
1733 struct iwl4965_rx_packet *pkt,
b481de9c
ZY
1734 struct ieee80211_hdr *header, int group100)
1735{
1736 u32 to_us;
1737 u32 print_summary = 0;
1738 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1739 u32 hundred = 0;
1740 u32 dataframe = 0;
1741 u16 fc;
1742 u16 seq_ctl;
1743 u16 channel;
1744 u16 phy_flags;
1745 int rate_sym;
1746 u16 length;
1747 u16 status;
1748 u16 bcn_tmr;
1749 u32 tsf_low;
1750 u64 tsf;
1751 u8 rssi;
1752 u8 agc;
1753 u16 sig_avg;
1754 u16 noise_diff;
bb8c093b
CH
1755 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1756 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1757 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
b481de9c
ZY
1758 u8 *data = IWL_RX_DATA(pkt);
1759
1760 /* MAC header */
1761 fc = le16_to_cpu(header->frame_control);
1762 seq_ctl = le16_to_cpu(header->seq_ctrl);
1763
1764 /* metadata */
1765 channel = le16_to_cpu(rx_hdr->channel);
1766 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1767 rate_sym = rx_hdr->rate;
1768 length = le16_to_cpu(rx_hdr->len);
1769
1770 /* end-of-frame status and timestamp */
1771 status = le32_to_cpu(rx_end->status);
1772 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1773 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1774 tsf = le64_to_cpu(rx_end->timestamp);
1775
1776 /* signal statistics */
1777 rssi = rx_stats->rssi;
1778 agc = rx_stats->agc;
1779 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1780 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1781
1782 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1783
1784 /* if data frame is to us and all is good,
1785 * (optionally) print summary for only 1 out of every 100 */
1786 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1787 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1788 dataframe = 1;
1789 if (!group100)
1790 print_summary = 1; /* print each frame */
1791 else if (priv->framecnt_to_us < 100) {
1792 priv->framecnt_to_us++;
1793 print_summary = 0;
1794 } else {
1795 priv->framecnt_to_us = 0;
1796 print_summary = 1;
1797 hundred = 1;
1798 }
1799 } else {
1800 /* print summary for all other frames */
1801 print_summary = 1;
1802 }
1803
1804 if (print_summary) {
1805 char *title;
1806 u32 rate;
1807
1808 if (hundred)
1809 title = "100Frames";
1810 else if (fc & IEEE80211_FCTL_RETRY)
1811 title = "Retry";
1812 else if (ieee80211_is_assoc_response(fc))
1813 title = "AscRsp";
1814 else if (ieee80211_is_reassoc_response(fc))
1815 title = "RasRsp";
1816 else if (ieee80211_is_probe_response(fc)) {
1817 title = "PrbRsp";
1818 print_dump = 1; /* dump frame contents */
1819 } else if (ieee80211_is_beacon(fc)) {
1820 title = "Beacon";
1821 print_dump = 1; /* dump frame contents */
1822 } else if (ieee80211_is_atim(fc))
1823 title = "ATIM";
1824 else if (ieee80211_is_auth(fc))
1825 title = "Auth";
1826 else if (ieee80211_is_deauth(fc))
1827 title = "DeAuth";
1828 else if (ieee80211_is_disassoc(fc))
1829 title = "DisAssoc";
1830 else
1831 title = "Frame";
1832
bb8c093b 1833 rate = iwl4965_rate_index_from_plcp(rate_sym);
b481de9c
ZY
1834 if (rate == -1)
1835 rate = 0;
1836 else
bb8c093b 1837 rate = iwl4965_rates[rate].ieee / 2;
b481de9c
ZY
1838
1839 /* print frame summary.
1840 * MAC addresses show just the last byte (for brevity),
1841 * but you can hack it to show more, if you'd like to. */
1842 if (dataframe)
1843 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1844 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1845 title, fc, header->addr1[5],
1846 length, rssi, channel, rate);
1847 else {
1848 /* src/dst addresses assume managed mode */
1849 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1850 "src=0x%02x, rssi=%u, tim=%lu usec, "
1851 "phy=0x%02x, chnl=%d\n",
1852 title, fc, header->addr1[5],
1853 header->addr3[5], rssi,
1854 tsf_low - priv->scan_start_tsf,
1855 phy_flags, channel);
1856 }
1857 }
1858 if (print_dump)
bb8c093b 1859 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
b481de9c
ZY
1860}
1861#endif
1862
bb8c093b 1863static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
b481de9c
ZY
1864{
1865 if (priv->hw_setting.shared_virt)
1866 pci_free_consistent(priv->pci_dev,
bb8c093b 1867 sizeof(struct iwl4965_shared),
b481de9c
ZY
1868 priv->hw_setting.shared_virt,
1869 priv->hw_setting.shared_phys);
1870}
1871
1872/**
bb8c093b 1873 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1874 *
1875 * return : set the bit for each supported rate insert in ie
1876 */
bb8c093b 1877static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1878 u16 basic_rate, int *left)
b481de9c
ZY
1879{
1880 u16 ret_rates = 0, bit;
1881 int i;
c7c46676
TW
1882 u8 *cnt = ie;
1883 u8 *rates = ie + 1;
b481de9c
ZY
1884
1885 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1886 if (bit & supported_rate) {
1887 ret_rates |= bit;
bb8c093b 1888 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1889 ((bit & basic_rate) ? 0x80 : 0x00);
1890 (*cnt)++;
1891 (*left)--;
1892 if ((*left <= 0) ||
1893 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1894 break;
1895 }
1896 }
1897
1898 return ret_rates;
1899}
1900
c8b0e6e1 1901#ifdef CONFIG_IWL4965_HT
bb8c093b 1902void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
1903 struct ieee80211_ht_cap *ht_cap,
1904 u8 use_current_config);
b481de9c
ZY
1905#endif
1906
1907/**
bb8c093b 1908 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1909 */
bb8c093b 1910static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
b481de9c
ZY
1911 struct ieee80211_mgmt *frame,
1912 int left, int is_direct)
1913{
1914 int len = 0;
1915 u8 *pos = NULL;
bee488db 1916 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
8fb88032
RR
1917#ifdef CONFIG_IWL4965_HT
1918 struct ieee80211_hw_mode *mode;
1919#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
1920
1921 /* Make sure there is enough space for the probe request,
1922 * two mandatory IEs and the data */
1923 left -= 24;
1924 if (left < 0)
1925 return 0;
1926 len += 24;
1927
1928 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1929 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1930 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1931 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1932 frame->seq_ctrl = 0;
1933
1934 /* fill in our indirect SSID IE */
1935 /* ...next IE... */
1936
1937 left -= 2;
1938 if (left < 0)
1939 return 0;
1940 len += 2;
1941 pos = &(frame->u.probe_req.variable[0]);
1942 *pos++ = WLAN_EID_SSID;
1943 *pos++ = 0;
1944
1945 /* fill in our direct SSID IE... */
1946 if (is_direct) {
1947 /* ...next IE... */
1948 left -= 2 + priv->essid_len;
1949 if (left < 0)
1950 return 0;
1951 /* ... fill it in... */
1952 *pos++ = WLAN_EID_SSID;
1953 *pos++ = priv->essid_len;
1954 memcpy(pos, priv->essid, priv->essid_len);
1955 pos += priv->essid_len;
1956 len += 2 + priv->essid_len;
1957 }
1958
1959 /* fill in supported rate */
1960 /* ...next IE... */
1961 left -= 2;
1962 if (left < 0)
1963 return 0;
c7c46676 1964
b481de9c
ZY
1965 /* ... fill it in... */
1966 *pos++ = WLAN_EID_SUPP_RATES;
1967 *pos = 0;
c7c46676 1968
bee488db 1969 /* exclude 60M rate */
1970 active_rates = priv->rates_mask;
1971 active_rates &= ~IWL_RATE_60M_MASK;
1972
1973 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1974
c7c46676 1975 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1976 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1977 active_rate_basic, &left);
c7c46676
TW
1978 active_rates &= ~ret_rates;
1979
bb8c093b 1980 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1981 active_rate_basic, &left);
c7c46676
TW
1982 active_rates &= ~ret_rates;
1983
b481de9c
ZY
1984 len += 2 + *pos;
1985 pos += (*pos) + 1;
c7c46676 1986 if (active_rates == 0)
b481de9c
ZY
1987 goto fill_end;
1988
1989 /* fill in supported extended rate */
1990 /* ...next IE... */
1991 left -= 2;
1992 if (left < 0)
1993 return 0;
1994 /* ... fill it in... */
1995 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1996 *pos = 0;
bb8c093b 1997 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1998 active_rate_basic, &left);
b481de9c
ZY
1999 if (*pos > 0)
2000 len += 2 + *pos;
2001
c8b0e6e1 2002#ifdef CONFIG_IWL4965_HT
8fb88032
RR
2003 mode = priv->hw->conf.mode;
2004 if (mode->ht_info.ht_supported) {
b481de9c
ZY
2005 pos += (*pos) + 1;
2006 *pos++ = WLAN_EID_HT_CAPABILITY;
8fb88032
RR
2007 *pos++ = sizeof(struct ieee80211_ht_cap);
2008 iwl4965_set_ht_capab(priv->hw,
2009 (struct ieee80211_ht_cap *)pos, 0);
2010 len += 2 + sizeof(struct ieee80211_ht_cap);
b481de9c 2011 }
c8b0e6e1 2012#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
2013
2014 fill_end:
2015 return (u16)len;
2016}
2017
2018/*
2019 * QoS support
2020*/
c8b0e6e1 2021#ifdef CONFIG_IWL4965_QOS
bb8c093b
CH
2022static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
2023 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
2024{
2025
bb8c093b
CH
2026 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2027 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
2028}
2029
bb8c093b 2030static void iwl4965_reset_qos(struct iwl4965_priv *priv)
b481de9c
ZY
2031{
2032 u16 cw_min = 15;
2033 u16 cw_max = 1023;
2034 u8 aifs = 2;
2035 u8 is_legacy = 0;
2036 unsigned long flags;
2037 int i;
2038
2039 spin_lock_irqsave(&priv->lock, flags);
2040 priv->qos_data.qos_active = 0;
2041
2042 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2043 if (priv->qos_data.qos_enable)
2044 priv->qos_data.qos_active = 1;
2045 if (!(priv->active_rate & 0xfff0)) {
2046 cw_min = 31;
2047 is_legacy = 1;
2048 }
2049 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2050 if (priv->qos_data.qos_enable)
2051 priv->qos_data.qos_active = 1;
2052 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2053 cw_min = 31;
2054 is_legacy = 1;
2055 }
2056
2057 if (priv->qos_data.qos_active)
2058 aifs = 3;
2059
2060 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2061 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2062 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2063 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2064 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2065
2066 if (priv->qos_data.qos_active) {
2067 i = 1;
2068 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2069 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2070 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2071 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2072 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2073
2074 i = 2;
2075 priv->qos_data.def_qos_parm.ac[i].cw_min =
2076 cpu_to_le16((cw_min + 1) / 2 - 1);
2077 priv->qos_data.def_qos_parm.ac[i].cw_max =
2078 cpu_to_le16(cw_max);
2079 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2080 if (is_legacy)
2081 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2082 cpu_to_le16(6016);
2083 else
2084 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2085 cpu_to_le16(3008);
2086 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2087
2088 i = 3;
2089 priv->qos_data.def_qos_parm.ac[i].cw_min =
2090 cpu_to_le16((cw_min + 1) / 4 - 1);
2091 priv->qos_data.def_qos_parm.ac[i].cw_max =
2092 cpu_to_le16((cw_max + 1) / 2 - 1);
2093 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2094 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2095 if (is_legacy)
2096 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2097 cpu_to_le16(3264);
2098 else
2099 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2100 cpu_to_le16(1504);
2101 } else {
2102 for (i = 1; i < 4; i++) {
2103 priv->qos_data.def_qos_parm.ac[i].cw_min =
2104 cpu_to_le16(cw_min);
2105 priv->qos_data.def_qos_parm.ac[i].cw_max =
2106 cpu_to_le16(cw_max);
2107 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2108 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2109 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2110 }
2111 }
2112 IWL_DEBUG_QOS("set QoS to default \n");
2113
2114 spin_unlock_irqrestore(&priv->lock, flags);
2115}
2116
bb8c093b 2117static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
b481de9c
ZY
2118{
2119 unsigned long flags;
2120
b481de9c
ZY
2121 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2122 return;
2123
2124 if (!priv->qos_data.qos_enable)
2125 return;
2126
2127 spin_lock_irqsave(&priv->lock, flags);
2128 priv->qos_data.def_qos_parm.qos_flags = 0;
2129
2130 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2131 !priv->qos_data.qos_cap.q_AP.txop_request)
2132 priv->qos_data.def_qos_parm.qos_flags |=
2133 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
2134 if (priv->qos_data.qos_active)
2135 priv->qos_data.def_qos_parm.qos_flags |=
2136 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2137
c8b0e6e1 2138#ifdef CONFIG_IWL4965_HT
fd105e79 2139 if (priv->current_ht_config.is_ht)
f1f1f5c7 2140 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 2141#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 2142
b481de9c
ZY
2143 spin_unlock_irqrestore(&priv->lock, flags);
2144
bb8c093b 2145 if (force || iwl4965_is_associated(priv)) {
f1f1f5c7
TW
2146 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2147 priv->qos_data.qos_active,
2148 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 2149
bb8c093b 2150 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
2151 &(priv->qos_data.def_qos_parm));
2152 }
2153}
2154
c8b0e6e1 2155#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
2156/*
2157 * Power management (not Tx power!) functions
2158 */
2159#define MSEC_TO_USEC 1024
2160
2161#define NOSLP __constant_cpu_to_le16(0), 0, 0
2162#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2163#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2164#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2165 __constant_cpu_to_le32(X1), \
2166 __constant_cpu_to_le32(X2), \
2167 __constant_cpu_to_le32(X3), \
2168 __constant_cpu_to_le32(X4)}
2169
2170
2171/* default power management (not Tx power) table values */
2172/* for tim 0-10 */
bb8c093b 2173static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
2174 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2175 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2176 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2177 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2178 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2179 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2180};
2181
2182/* for tim > 10 */
bb8c093b 2183static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
2184 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2185 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2186 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2187 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2188 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2189 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2190 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2191 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2192 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2193 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2194};
2195
bb8c093b 2196int iwl4965_power_init_handle(struct iwl4965_priv *priv)
b481de9c
ZY
2197{
2198 int rc = 0, i;
bb8c093b
CH
2199 struct iwl4965_power_mgr *pow_data;
2200 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
2201 u16 pci_pm;
2202
2203 IWL_DEBUG_POWER("Initialize power \n");
2204
2205 pow_data = &(priv->power_data);
2206
2207 memset(pow_data, 0, sizeof(*pow_data));
2208
2209 pow_data->active_index = IWL_POWER_RANGE_0;
2210 pow_data->dtim_val = 0xffff;
2211
2212 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2213 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2214
2215 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2216 if (rc != 0)
2217 return 0;
2218 else {
bb8c093b 2219 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
2220
2221 IWL_DEBUG_POWER("adjust power command flags\n");
2222
2223 for (i = 0; i < IWL_POWER_AC; i++) {
2224 cmd = &pow_data->pwr_range_0[i].cmd;
2225
2226 if (pci_pm & 0x1)
2227 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2228 else
2229 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2230 }
2231 }
2232 return rc;
2233}
2234
bb8c093b
CH
2235static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2236 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
2237{
2238 int rc = 0, i;
2239 u8 skip;
2240 u32 max_sleep = 0;
bb8c093b 2241 struct iwl4965_power_vec_entry *range;
b481de9c 2242 u8 period = 0;
bb8c093b 2243 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
2244
2245 if (mode > IWL_POWER_INDEX_5) {
2246 IWL_DEBUG_POWER("Error invalid power mode \n");
2247 return -1;
2248 }
2249 pow_data = &(priv->power_data);
2250
2251 if (pow_data->active_index == IWL_POWER_RANGE_0)
2252 range = &pow_data->pwr_range_0[0];
2253 else
2254 range = &pow_data->pwr_range_1[1];
2255
bb8c093b 2256 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
2257
2258#ifdef IWL_MAC80211_DISABLE
2259 if (priv->assoc_network != NULL) {
2260 unsigned long flags;
2261
2262 period = priv->assoc_network->tim.tim_period;
2263 }
2264#endif /*IWL_MAC80211_DISABLE */
2265 skip = range[mode].no_dtim;
2266
2267 if (period == 0) {
2268 period = 1;
2269 skip = 0;
2270 }
2271
2272 if (skip == 0) {
2273 max_sleep = period;
2274 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2275 } else {
2276 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2277 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2278 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2279 }
2280
2281 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2282 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2283 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2284 }
2285
2286 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2287 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2288 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2289 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2290 le32_to_cpu(cmd->sleep_interval[0]),
2291 le32_to_cpu(cmd->sleep_interval[1]),
2292 le32_to_cpu(cmd->sleep_interval[2]),
2293 le32_to_cpu(cmd->sleep_interval[3]),
2294 le32_to_cpu(cmd->sleep_interval[4]));
2295
2296 return rc;
2297}
2298
bb8c093b 2299static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
b481de9c 2300{
9a62f73b 2301 u32 uninitialized_var(final_mode);
b481de9c 2302 int rc;
bb8c093b 2303 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
2304
2305 /* If on battery, set to 3,
01ebd063 2306 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
2307 * else user level */
2308 switch (mode) {
2309 case IWL_POWER_BATTERY:
2310 final_mode = IWL_POWER_INDEX_3;
2311 break;
2312 case IWL_POWER_AC:
2313 final_mode = IWL_POWER_MODE_CAM;
2314 break;
2315 default:
2316 final_mode = mode;
2317 break;
2318 }
2319
2320 cmd.keep_alive_beacons = 0;
2321
bb8c093b 2322 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 2323
bb8c093b 2324 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2325
2326 if (final_mode == IWL_POWER_MODE_CAM)
2327 clear_bit(STATUS_POWER_PMI, &priv->status);
2328 else
2329 set_bit(STATUS_POWER_PMI, &priv->status);
2330
2331 return rc;
2332}
2333
bb8c093b 2334int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2335{
2336 /* Filter incoming packets to determine if they are targeted toward
2337 * this network, discarding packets coming from ourselves */
2338 switch (priv->iw_mode) {
2339 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2340 /* packets from our adapter are dropped (echo) */
2341 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2342 return 0;
2343 /* {broad,multi}cast packets to our IBSS go through */
2344 if (is_multicast_ether_addr(header->addr1))
2345 return !compare_ether_addr(header->addr3, priv->bssid);
2346 /* packets to our adapter go through */
2347 return !compare_ether_addr(header->addr1, priv->mac_addr);
2348 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2349 /* packets from our adapter are dropped (echo) */
2350 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2351 return 0;
2352 /* {broad,multi}cast packets to our BSS go through */
2353 if (is_multicast_ether_addr(header->addr1))
2354 return !compare_ether_addr(header->addr2, priv->bssid);
2355 /* packets to our adapter go through */
2356 return !compare_ether_addr(header->addr1, priv->mac_addr);
2357 }
2358
2359 return 1;
2360}
2361
2362#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2363
bb8c093b 2364static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
2365{
2366 switch (status & TX_STATUS_MSK) {
2367 case TX_STATUS_SUCCESS:
2368 return "SUCCESS";
2369 TX_STATUS_ENTRY(SHORT_LIMIT);
2370 TX_STATUS_ENTRY(LONG_LIMIT);
2371 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2372 TX_STATUS_ENTRY(MGMNT_ABORT);
2373 TX_STATUS_ENTRY(NEXT_FRAG);
2374 TX_STATUS_ENTRY(LIFE_EXPIRE);
2375 TX_STATUS_ENTRY(DEST_PS);
2376 TX_STATUS_ENTRY(ABORTED);
2377 TX_STATUS_ENTRY(BT_RETRY);
2378 TX_STATUS_ENTRY(STA_INVALID);
2379 TX_STATUS_ENTRY(FRAG_DROPPED);
2380 TX_STATUS_ENTRY(TID_DISABLE);
2381 TX_STATUS_ENTRY(FRAME_FLUSHED);
2382 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2383 TX_STATUS_ENTRY(TX_LOCKED);
2384 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2385 }
2386
2387 return "UNKNOWN";
2388}
2389
2390/**
bb8c093b 2391 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
2392 *
2393 * NOTE: priv->mutex is not required before calling this function
2394 */
bb8c093b 2395static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
b481de9c
ZY
2396{
2397 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2398 clear_bit(STATUS_SCANNING, &priv->status);
2399 return 0;
2400 }
2401
2402 if (test_bit(STATUS_SCANNING, &priv->status)) {
2403 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2404 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2405 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2406 queue_work(priv->workqueue, &priv->abort_scan);
2407
2408 } else
2409 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2410
2411 return test_bit(STATUS_SCANNING, &priv->status);
2412 }
2413
2414 return 0;
2415}
2416
2417/**
bb8c093b 2418 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
2419 * @ms: amount of time to wait (in milliseconds) for scan to abort
2420 *
2421 * NOTE: priv->mutex must be held before calling this function
2422 */
bb8c093b 2423static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms)
b481de9c
ZY
2424{
2425 unsigned long now = jiffies;
2426 int ret;
2427
bb8c093b 2428 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
2429 if (ret && ms) {
2430 mutex_unlock(&priv->mutex);
2431 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2432 test_bit(STATUS_SCANNING, &priv->status))
2433 msleep(1);
2434 mutex_lock(&priv->mutex);
2435
2436 return test_bit(STATUS_SCANNING, &priv->status);
2437 }
2438
2439 return ret;
2440}
2441
bb8c093b 2442static void iwl4965_sequence_reset(struct iwl4965_priv *priv)
b481de9c
ZY
2443{
2444 /* Reset ieee stats */
2445
2446 /* We don't reset the net_device_stats (ieee->stats) on
2447 * re-association */
2448
2449 priv->last_seq_num = -1;
2450 priv->last_frag_num = -1;
2451 priv->last_packet_time = 0;
2452
bb8c093b 2453 iwl4965_scan_cancel(priv);
b481de9c
ZY
2454}
2455
2456#define MAX_UCODE_BEACON_INTERVAL 4096
2457#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2458
bb8c093b 2459static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
2460{
2461 u16 new_val = 0;
2462 u16 beacon_factor = 0;
2463
2464 beacon_factor =
2465 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2466 / MAX_UCODE_BEACON_INTERVAL;
2467 new_val = beacon_val / beacon_factor;
2468
2469 return cpu_to_le16(new_val);
2470}
2471
bb8c093b 2472static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
b481de9c
ZY
2473{
2474 u64 interval_tm_unit;
2475 u64 tsf, result;
2476 unsigned long flags;
2477 struct ieee80211_conf *conf = NULL;
2478 u16 beacon_int = 0;
2479
2480 conf = ieee80211_get_hw_conf(priv->hw);
2481
2482 spin_lock_irqsave(&priv->lock, flags);
2483 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2484 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2485
2486 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2487
2488 tsf = priv->timestamp1;
2489 tsf = ((tsf << 32) | priv->timestamp0);
2490
2491 beacon_int = priv->beacon_int;
2492 spin_unlock_irqrestore(&priv->lock, flags);
2493
2494 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2495 if (beacon_int == 0) {
2496 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2497 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2498 } else {
2499 priv->rxon_timing.beacon_interval =
2500 cpu_to_le16(beacon_int);
2501 priv->rxon_timing.beacon_interval =
bb8c093b 2502 iwl4965_adjust_beacon_interval(
b481de9c
ZY
2503 le16_to_cpu(priv->rxon_timing.beacon_interval));
2504 }
2505
2506 priv->rxon_timing.atim_window = 0;
2507 } else {
2508 priv->rxon_timing.beacon_interval =
bb8c093b 2509 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
2510 /* TODO: we need to get atim_window from upper stack
2511 * for now we set to 0 */
2512 priv->rxon_timing.atim_window = 0;
2513 }
2514
2515 interval_tm_unit =
2516 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2517 result = do_div(tsf, interval_tm_unit);
2518 priv->rxon_timing.beacon_init_val =
2519 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2520
2521 IWL_DEBUG_ASSOC
2522 ("beacon interval %d beacon timer %d beacon tim %d\n",
2523 le16_to_cpu(priv->rxon_timing.beacon_interval),
2524 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2525 le16_to_cpu(priv->rxon_timing.atim_window));
2526}
2527
bb8c093b 2528static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
b481de9c
ZY
2529{
2530 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2531 IWL_ERROR("APs don't scan.\n");
2532 return 0;
2533 }
2534
bb8c093b 2535 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
2536 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2537 return -EIO;
2538 }
2539
2540 if (test_bit(STATUS_SCANNING, &priv->status)) {
2541 IWL_DEBUG_SCAN("Scan already in progress.\n");
2542 return -EAGAIN;
2543 }
2544
2545 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2546 IWL_DEBUG_SCAN("Scan request while abort pending. "
2547 "Queuing.\n");
2548 return -EAGAIN;
2549 }
2550
2551 IWL_DEBUG_INFO("Starting scan...\n");
2552 priv->scan_bands = 2;
2553 set_bit(STATUS_SCANNING, &priv->status);
2554 priv->scan_start = jiffies;
2555 priv->scan_pass_start = priv->scan_start;
2556
2557 queue_work(priv->workqueue, &priv->request_scan);
2558
2559 return 0;
2560}
2561
bb8c093b 2562static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
b481de9c 2563{
bb8c093b 2564 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
2565
2566 if (hw_decrypt)
2567 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2568 else
2569 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2570
2571 return 0;
2572}
2573
bb8c093b 2574static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
b481de9c
ZY
2575{
2576 if (phymode == MODE_IEEE80211A) {
2577 priv->staging_rxon.flags &=
2578 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2579 | RXON_FLG_CCK_MSK);
2580 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2581 } else {
bb8c093b 2582 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
2583 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2584 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2585 else
2586 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2587
2588 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2589 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2590
2591 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2592 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2593 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2594 }
2595}
2596
2597/*
01ebd063 2598 * initialize rxon structure with default values from eeprom
b481de9c 2599 */
bb8c093b 2600static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
b481de9c 2601{
bb8c093b 2602 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
2603
2604 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2605
2606 switch (priv->iw_mode) {
2607 case IEEE80211_IF_TYPE_AP:
2608 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2609 break;
2610
2611 case IEEE80211_IF_TYPE_STA:
2612 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2613 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2614 break;
2615
2616 case IEEE80211_IF_TYPE_IBSS:
2617 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2618 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2619 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2620 RXON_FILTER_ACCEPT_GRP_MSK;
2621 break;
2622
2623 case IEEE80211_IF_TYPE_MNTR:
2624 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2625 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2626 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2627 break;
2628 }
2629
2630#if 0
2631 /* TODO: Figure out when short_preamble would be set and cache from
2632 * that */
2633 if (!hw_to_local(priv->hw)->short_preamble)
2634 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2635 else
2636 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2637#endif
2638
bb8c093b 2639 ch_info = iwl4965_get_channel_info(priv, priv->phymode,
b481de9c
ZY
2640 le16_to_cpu(priv->staging_rxon.channel));
2641
2642 if (!ch_info)
2643 ch_info = &priv->channel_info[0];
2644
2645 /*
2646 * in some case A channels are all non IBSS
2647 * in this case force B/G channel
2648 */
2649 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2650 !(is_channel_ibss(ch_info)))
2651 ch_info = &priv->channel_info[0];
2652
2653 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2654 if (is_channel_a_band(ch_info))
2655 priv->phymode = MODE_IEEE80211A;
2656 else
2657 priv->phymode = MODE_IEEE80211G;
2658
bb8c093b 2659 iwl4965_set_flags_for_phymode(priv, priv->phymode);
b481de9c
ZY
2660
2661 priv->staging_rxon.ofdm_basic_rates =
2662 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2663 priv->staging_rxon.cck_basic_rates =
2664 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2665
2666 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2667 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2668 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2669 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2670 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2671 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2672 iwl4965_set_rxon_chain(priv);
2673}
2674
bb8c093b 2675static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
b481de9c 2676{
b481de9c 2677 if (mode == IEEE80211_IF_TYPE_IBSS) {
bb8c093b 2678 const struct iwl4965_channel_info *ch_info;
b481de9c 2679
bb8c093b 2680 ch_info = iwl4965_get_channel_info(priv,
b481de9c
ZY
2681 priv->phymode,
2682 le16_to_cpu(priv->staging_rxon.channel));
2683
2684 if (!ch_info || !is_channel_ibss(ch_info)) {
2685 IWL_ERROR("channel %d not IBSS channel\n",
2686 le16_to_cpu(priv->staging_rxon.channel));
2687 return -EINVAL;
2688 }
2689 }
2690
b481de9c
ZY
2691 priv->iw_mode = mode;
2692
bb8c093b 2693 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2694 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2695
bb8c093b 2696 iwl4965_clear_stations_table(priv);
b481de9c 2697
fde3571f
MA
2698 /* dont commit rxon if rf-kill is on*/
2699 if (!iwl4965_is_ready_rf(priv))
2700 return -EAGAIN;
2701
2702 cancel_delayed_work(&priv->scan_check);
2703 if (iwl4965_scan_cancel_timeout(priv, 100)) {
2704 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2705 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2706 return -EAGAIN;
2707 }
2708
bb8c093b 2709 iwl4965_commit_rxon(priv);
b481de9c
ZY
2710
2711 return 0;
2712}
2713
bb8c093b 2714static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
b481de9c 2715 struct ieee80211_tx_control *ctl,
bb8c093b 2716 struct iwl4965_cmd *cmd,
b481de9c
ZY
2717 struct sk_buff *skb_frag,
2718 int last_frag)
2719{
bb8c093b 2720 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
b481de9c
ZY
2721
2722 switch (keyinfo->alg) {
2723 case ALG_CCMP:
2724 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2725 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2726 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2727 break;
2728
2729 case ALG_TKIP:
2730#if 0
2731 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2732
2733 if (last_frag)
2734 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2735 8);
2736 else
2737 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2738#endif
2739 break;
2740
2741 case ALG_WEP:
2742 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2743 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2744
2745 if (keyinfo->keylen == 13)
2746 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2747
2748 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2749
2750 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2751 "with key %d\n", ctl->key_idx);
2752 break;
2753
b481de9c
ZY
2754 default:
2755 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2756 break;
2757 }
2758}
2759
2760/*
2761 * handle build REPLY_TX command notification.
2762 */
bb8c093b
CH
2763static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2764 struct iwl4965_cmd *cmd,
b481de9c
ZY
2765 struct ieee80211_tx_control *ctrl,
2766 struct ieee80211_hdr *hdr,
2767 int is_unicast, u8 std_id)
2768{
2769 __le16 *qc;
2770 u16 fc = le16_to_cpu(hdr->frame_control);
2771 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2772
2773 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2774 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2775 tx_flags |= TX_CMD_FLG_ACK_MSK;
2776 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2777 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2778 if (ieee80211_is_probe_response(fc) &&
2779 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2780 tx_flags |= TX_CMD_FLG_TSF_MSK;
2781 } else {
2782 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2783 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2784 }
2785
87e4f7df
TW
2786 if (ieee80211_is_back_request(fc))
2787 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2788
2789
b481de9c
ZY
2790 cmd->cmd.tx.sta_id = std_id;
2791 if (ieee80211_get_morefrag(hdr))
2792 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2793
2794 qc = ieee80211_get_qos_ctrl(hdr);
2795 if (qc) {
2796 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2797 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2798 } else
2799 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2800
2801 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2802 tx_flags |= TX_CMD_FLG_RTS_MSK;
2803 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2804 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2805 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2806 tx_flags |= TX_CMD_FLG_CTS_MSK;
2807 }
2808
2809 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2810 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2811
2812 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2813 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2814 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2815 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2816 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2817 else
bc434dd2 2818 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
b481de9c
ZY
2819 } else
2820 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2821
2822 cmd->cmd.tx.driver_txop = 0;
2823 cmd->cmd.tx.tx_flags = tx_flags;
2824 cmd->cmd.tx.next_frame_len = 0;
2825}
2826
6440adb5
BC
2827/**
2828 * iwl4965_get_sta_id - Find station's index within station table
2829 *
2830 * If new IBSS station, create new entry in station table
2831 */
9fbab516
BC
2832static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2833 struct ieee80211_hdr *hdr)
b481de9c
ZY
2834{
2835 int sta_id;
2836 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2837 DECLARE_MAC_BUF(mac);
b481de9c 2838
6440adb5 2839 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2840 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2841 is_multicast_ether_addr(hdr->addr1))
2842 return priv->hw_setting.bcast_sta_id;
2843
2844 switch (priv->iw_mode) {
2845
6440adb5
BC
2846 /* If we are a client station in a BSS network, use the special
2847 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2848 case IEEE80211_IF_TYPE_STA:
2849 return IWL_AP_ID;
2850
2851 /* If we are an AP, then find the station, or use BCAST */
2852 case IEEE80211_IF_TYPE_AP:
bb8c093b 2853 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2854 if (sta_id != IWL_INVALID_STATION)
2855 return sta_id;
2856 return priv->hw_setting.bcast_sta_id;
2857
6440adb5
BC
2858 /* If this frame is going out to an IBSS network, find the station,
2859 * or create a new station table entry */
b481de9c 2860 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2861 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2862 if (sta_id != IWL_INVALID_STATION)
2863 return sta_id;
2864
6440adb5 2865 /* Create new station table entry */
67d62035
RR
2866 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2867 0, CMD_ASYNC, NULL);
b481de9c
ZY
2868
2869 if (sta_id != IWL_INVALID_STATION)
2870 return sta_id;
2871
0795af57 2872 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2873 "Defaulting to broadcast...\n",
0795af57 2874 print_mac(mac, hdr->addr1));
bb8c093b 2875 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2876 return priv->hw_setting.bcast_sta_id;
2877
2878 default:
01ebd063 2879 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2880 return priv->hw_setting.bcast_sta_id;
2881 }
2882}
2883
2884/*
2885 * start REPLY_TX command process
2886 */
bb8c093b 2887static int iwl4965_tx_skb(struct iwl4965_priv *priv,
b481de9c
ZY
2888 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2889{
2890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2891 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2892 u32 *control_flags;
2893 int txq_id = ctl->queue;
bb8c093b
CH
2894 struct iwl4965_tx_queue *txq = NULL;
2895 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2896 dma_addr_t phys_addr;
2897 dma_addr_t txcmd_phys;
87e4f7df 2898 dma_addr_t scratch_phys;
bb8c093b 2899 struct iwl4965_cmd *out_cmd = NULL;
b481de9c
ZY
2900 u16 len, idx, len_org;
2901 u8 id, hdr_len, unicast;
2902 u8 sta_id;
2903 u16 seq_number = 0;
2904 u16 fc;
2905 __le16 *qc;
2906 u8 wait_write_ptr = 0;
2907 unsigned long flags;
2908 int rc;
2909
2910 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2911 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
2912 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2913 goto drop_unlock;
2914 }
2915
32bfd35d
JB
2916 if (!priv->vif) {
2917 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
b481de9c
ZY
2918 goto drop_unlock;
2919 }
2920
2921 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2922 IWL_ERROR("ERROR: No TX rate available.\n");
2923 goto drop_unlock;
2924 }
2925
2926 unicast = !is_multicast_ether_addr(hdr->addr1);
2927 id = 0;
2928
2929 fc = le16_to_cpu(hdr->frame_control);
2930
c8b0e6e1 2931#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
2932 if (ieee80211_is_auth(fc))
2933 IWL_DEBUG_TX("Sending AUTH frame\n");
2934 else if (ieee80211_is_assoc_request(fc))
2935 IWL_DEBUG_TX("Sending ASSOC frame\n");
2936 else if (ieee80211_is_reassoc_request(fc))
2937 IWL_DEBUG_TX("Sending REASSOC frame\n");
2938#endif
2939
7878a5a4 2940 /* drop all data frame if we are not associated */
76f3915b
GG
2941 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
2942 (!iwl4965_is_associated(priv) ||
2943 !priv->assoc_id ||
2944 !priv->assoc_station_added)) {
bb8c093b 2945 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
b481de9c
ZY
2946 goto drop_unlock;
2947 }
2948
2949 spin_unlock_irqrestore(&priv->lock, flags);
2950
2951 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
BC
2952
2953 /* Find (or create) index into station table for destination station */
bb8c093b 2954 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2955 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2956 DECLARE_MAC_BUF(mac);
2957
2958 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2959 print_mac(mac, hdr->addr1));
b481de9c
ZY
2960 goto drop;
2961 }
2962
2963 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2964
2965 qc = ieee80211_get_qos_ctrl(hdr);
2966 if (qc) {
2967 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2968 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2969 IEEE80211_SCTL_SEQ;
2970 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2971 (hdr->seq_ctrl &
2972 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2973 seq_number += 0x10;
c8b0e6e1
CH
2974#ifdef CONFIG_IWL4965_HT
2975#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
2976 /* aggregation is on for this <sta,tid> */
2977 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2978 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
c8b0e6e1
CH
2979#endif /* CONFIG_IWL4965_HT_AGG */
2980#endif /* CONFIG_IWL4965_HT */
b481de9c 2981 }
6440adb5
BC
2982
2983 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2984 txq = &priv->txq[txq_id];
2985 q = &txq->q;
2986
2987 spin_lock_irqsave(&priv->lock, flags);
2988
6440adb5 2989 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2990 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2991 memset(tfd, 0, sizeof(*tfd));
2992 control_flags = (u32 *) tfd;
fc4b6853 2993 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2994
6440adb5 2995 /* Set up driver data for this TFD */
bb8c093b 2996 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2997 txq->txb[q->write_ptr].skb[0] = skb;
2998 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2999 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
BC
3000
3001 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
3002 out_cmd = &txq->cmd[idx];
3003 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
3004 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
BC
3005
3006 /*
3007 * Set up the Tx-command (not MAC!) header.
3008 * Store the chosen Tx queue and TFD index within the sequence field;
3009 * after Tx, uCode's Tx response will return this value so driver can
3010 * locate the frame within the tx queue and do post-tx processing.
3011 */
b481de9c
ZY
3012 out_cmd->hdr.cmd = REPLY_TX;
3013 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 3014 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
3015
3016 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
3017 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
3018
6440adb5
BC
3019 /*
3020 * Use the first empty entry in this queue's command buffer array
3021 * to contain the Tx command and MAC header concatenated together
3022 * (payload data will be in another buffer).
3023 * Size of this varies, due to varying MAC header length.
3024 * If end is not dword aligned, we'll have 2 extra bytes at the end
3025 * of the MAC header (device reads on dword boundaries).
3026 * We'll tell device about this padding later.
3027 */
b481de9c 3028 len = priv->hw_setting.tx_cmd_len +
bb8c093b 3029 sizeof(struct iwl4965_cmd_header) + hdr_len;
b481de9c
ZY
3030
3031 len_org = len;
3032 len = (len + 3) & ~3;
3033
3034 if (len_org != len)
3035 len_org = 1;
3036 else
3037 len_org = 0;
3038
6440adb5
BC
3039 /* Physical address of this Tx command's header (not MAC header!),
3040 * within command buffer array. */
bb8c093b
CH
3041 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx +
3042 offsetof(struct iwl4965_cmd, hdr);
b481de9c 3043
6440adb5
BC
3044 /* Add buffer containing Tx command and MAC(!) header to TFD's
3045 * first entry */
bb8c093b 3046 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
3047
3048 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
bb8c093b 3049 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
b481de9c 3050
6440adb5
BC
3051 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3052 * if any (802.11 null frames have no payload). */
b481de9c
ZY
3053 len = skb->len - hdr_len;
3054 if (len) {
3055 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
3056 len, PCI_DMA_TODEVICE);
bb8c093b 3057 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
3058 }
3059
6440adb5 3060 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
3061 if (len_org)
3062 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
3063
6440adb5 3064 /* Total # bytes to be transmitted */
b481de9c
ZY
3065 len = (u16)skb->len;
3066 out_cmd->cmd.tx.len = cpu_to_le16(len);
3067
3068 /* TODO need this for burst mode later on */
bb8c093b 3069 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
3070
3071 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 3072 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c 3073
87e4f7df
TW
3074 scratch_phys = txcmd_phys + sizeof(struct iwl4965_cmd_header) +
3075 offsetof(struct iwl4965_tx_cmd, scratch);
3076 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
3077 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
3078
3079#ifdef CONFIG_IWL4965_HT_AGG
3080#ifdef CONFIG_IWL4965_HT
3081 /* TODO: move this functionality to rate scaling */
3082 iwl4965_tl_get_stats(priv, hdr);
3083#endif /* CONFIG_IWL4965_HT_AGG */
3084#endif /*CONFIG_IWL4965_HT */
3085
b481de9c
ZY
3086
3087 if (!ieee80211_get_morefrag(hdr)) {
3088 txq->need_update = 1;
3089 if (qc) {
3090 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
3091 priv->stations[sta_id].tid[tid].seq_number = seq_number;
3092 }
3093 } else {
3094 wait_write_ptr = 1;
3095 txq->need_update = 0;
3096 }
3097
bb8c093b 3098 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
3099 sizeof(out_cmd->cmd.tx));
3100
bb8c093b 3101 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
3102 ieee80211_get_hdrlen(fc));
3103
6440adb5 3104 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
3105 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
3106
6440adb5 3107 /* Tell device the write index *just past* this latest filled TFD */
bb8c093b
CH
3108 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
3109 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3110 spin_unlock_irqrestore(&priv->lock, flags);
3111
3112 if (rc)
3113 return rc;
3114
bb8c093b 3115 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
3116 && priv->mac80211_registered) {
3117 if (wait_write_ptr) {
3118 spin_lock_irqsave(&priv->lock, flags);
3119 txq->need_update = 1;
bb8c093b 3120 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3121 spin_unlock_irqrestore(&priv->lock, flags);
3122 }
3123
3124 ieee80211_stop_queue(priv->hw, ctl->queue);
3125 }
3126
3127 return 0;
3128
3129drop_unlock:
3130 spin_unlock_irqrestore(&priv->lock, flags);
3131drop:
3132 return -1;
3133}
3134
bb8c093b 3135static void iwl4965_set_rate(struct iwl4965_priv *priv)
b481de9c
ZY
3136{
3137 const struct ieee80211_hw_mode *hw = NULL;
3138 struct ieee80211_rate *rate;
3139 int i;
3140
bb8c093b 3141 hw = iwl4965_get_hw_mode(priv, priv->phymode);
c4ba9621
SA
3142 if (!hw) {
3143 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3144 return;
3145 }
b481de9c
ZY
3146
3147 priv->active_rate = 0;
3148 priv->active_rate_basic = 0;
3149
3150 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3151 hw->mode == MODE_IEEE80211A ?
3152 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3153
3154 for (i = 0; i < hw->num_rates; i++) {
3155 rate = &(hw->rates[i]);
3156 if ((rate->val < IWL_RATE_COUNT) &&
3157 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3158 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
bb8c093b 3159 rate->val, iwl4965_rates[rate->val].plcp,
b481de9c
ZY
3160 (rate->flags & IEEE80211_RATE_BASIC) ?
3161 "*" : "");
3162 priv->active_rate |= (1 << rate->val);
3163 if (rate->flags & IEEE80211_RATE_BASIC)
3164 priv->active_rate_basic |= (1 << rate->val);
3165 } else
3166 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
bb8c093b 3167 rate->val, iwl4965_rates[rate->val].plcp);
b481de9c
ZY
3168 }
3169
3170 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3171 priv->active_rate, priv->active_rate_basic);
3172
3173 /*
3174 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3175 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3176 * OFDM
3177 */
3178 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3179 priv->staging_rxon.cck_basic_rates =
3180 ((priv->active_rate_basic &
3181 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3182 else
3183 priv->staging_rxon.cck_basic_rates =
3184 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3185
3186 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3187 priv->staging_rxon.ofdm_basic_rates =
3188 ((priv->active_rate_basic &
3189 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3190 IWL_FIRST_OFDM_RATE) & 0xFF;
3191 else
3192 priv->staging_rxon.ofdm_basic_rates =
3193 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3194}
3195
bb8c093b 3196static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
b481de9c
ZY
3197{
3198 unsigned long flags;
3199
3200 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3201 return;
3202
3203 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3204 disable_radio ? "OFF" : "ON");
3205
3206 if (disable_radio) {
bb8c093b 3207 iwl4965_scan_cancel(priv);
b481de9c
ZY
3208 /* FIXME: This is a workaround for AP */
3209 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3210 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3211 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3212 CSR_UCODE_SW_BIT_RFKILL);
3213 spin_unlock_irqrestore(&priv->lock, flags);
bb8c093b 3214 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
3215 set_bit(STATUS_RF_KILL_SW, &priv->status);
3216 }
3217 return;
3218 }
3219
3220 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3221 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
3222
3223 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3224 spin_unlock_irqrestore(&priv->lock, flags);
3225
3226 /* wake up ucode */
3227 msleep(10);
3228
3229 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
3230 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
3231 if (!iwl4965_grab_nic_access(priv))
3232 iwl4965_release_nic_access(priv);
b481de9c
ZY
3233 spin_unlock_irqrestore(&priv->lock, flags);
3234
3235 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3236 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3237 "disabled by HW switch\n");
3238 return;
3239 }
3240
3241 queue_work(priv->workqueue, &priv->restart);
3242 return;
3243}
3244
bb8c093b 3245void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
b481de9c
ZY
3246 u32 decrypt_res, struct ieee80211_rx_status *stats)
3247{
3248 u16 fc =
3249 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3250
3251 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3252 return;
3253
3254 if (!(fc & IEEE80211_FCTL_PROTECTED))
3255 return;
3256
3257 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3258 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3259 case RX_RES_STATUS_SEC_TYPE_TKIP:
3260 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3261 RX_RES_STATUS_BAD_ICV_MIC)
3262 stats->flag |= RX_FLAG_MMIC_ERROR;
3263 case RX_RES_STATUS_SEC_TYPE_WEP:
3264 case RX_RES_STATUS_SEC_TYPE_CCMP:
3265 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3266 RX_RES_STATUS_DECRYPT_OK) {
3267 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3268 stats->flag |= RX_FLAG_DECRYPTED;
3269 }
3270 break;
3271
3272 default:
3273 break;
3274 }
3275}
3276
b481de9c
ZY
3277
3278#define IWL_PACKET_RETRY_TIME HZ
3279
bb8c093b 3280int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
3281{
3282 u16 sc = le16_to_cpu(header->seq_ctrl);
3283 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3284 u16 frag = sc & IEEE80211_SCTL_FRAG;
3285 u16 *last_seq, *last_frag;
3286 unsigned long *last_time;
3287
3288 switch (priv->iw_mode) {
3289 case IEEE80211_IF_TYPE_IBSS:{
3290 struct list_head *p;
bb8c093b 3291 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
3292 u8 *mac = header->addr2;
3293 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3294
3295 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 3296 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
3297 if (!compare_ether_addr(entry->mac, mac))
3298 break;
3299 }
3300 if (p == &priv->ibss_mac_hash[index]) {
3301 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3302 if (!entry) {
bc434dd2 3303 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
3304 return 0;
3305 }
3306 memcpy(entry->mac, mac, ETH_ALEN);
3307 entry->seq_num = seq;
3308 entry->frag_num = frag;
3309 entry->packet_time = jiffies;
bc434dd2 3310 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
3311 return 0;
3312 }
3313 last_seq = &entry->seq_num;
3314 last_frag = &entry->frag_num;
3315 last_time = &entry->packet_time;
3316 break;
3317 }
3318 case IEEE80211_IF_TYPE_STA:
3319 last_seq = &priv->last_seq_num;
3320 last_frag = &priv->last_frag_num;
3321 last_time = &priv->last_packet_time;
3322 break;
3323 default:
3324 return 0;
3325 }
3326 if ((*last_seq == seq) &&
3327 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3328 if (*last_frag == frag)
3329 goto drop;
3330 if (*last_frag + 1 != frag)
3331 /* out-of-order fragment */
3332 goto drop;
3333 } else
3334 *last_seq = seq;
3335
3336 *last_frag = frag;
3337 *last_time = jiffies;
3338 return 0;
3339
3340 drop:
3341 return 1;
3342}
3343
c8b0e6e1 3344#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
3345
3346#include "iwl-spectrum.h"
3347
3348#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3349#define BEACON_TIME_MASK_HIGH 0xFF000000
3350#define TIME_UNIT 1024
3351
3352/*
3353 * extended beacon time format
3354 * time in usec will be changed into a 32-bit value in 8:24 format
3355 * the high 1 byte is the beacon counts
3356 * the lower 3 bytes is the time in usec within one beacon interval
3357 */
3358
bb8c093b 3359static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
3360{
3361 u32 quot;
3362 u32 rem;
3363 u32 interval = beacon_interval * 1024;
3364
3365 if (!interval || !usec)
3366 return 0;
3367
3368 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3369 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3370
3371 return (quot << 24) + rem;
3372}
3373
3374/* base is usually what we get from ucode with each received frame,
3375 * the same as HW timer counter counting down
3376 */
3377
bb8c093b 3378static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
3379{
3380 u32 base_low = base & BEACON_TIME_MASK_LOW;
3381 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3382 u32 interval = beacon_interval * TIME_UNIT;
3383 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3384 (addon & BEACON_TIME_MASK_HIGH);
3385
3386 if (base_low > addon_low)
3387 res += base_low - addon_low;
3388 else if (base_low < addon_low) {
3389 res += interval + base_low - addon_low;
3390 res += (1 << 24);
3391 } else
3392 res += (1 << 24);
3393
3394 return cpu_to_le32(res);
3395}
3396
bb8c093b 3397static int iwl4965_get_measurement(struct iwl4965_priv *priv,
b481de9c
ZY
3398 struct ieee80211_measurement_params *params,
3399 u8 type)
3400{
bb8c093b
CH
3401 struct iwl4965_spectrum_cmd spectrum;
3402 struct iwl4965_rx_packet *res;
3403 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
3404 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3405 .data = (void *)&spectrum,
3406 .meta.flags = CMD_WANT_SKB,
3407 };
3408 u32 add_time = le64_to_cpu(params->start_time);
3409 int rc;
3410 int spectrum_resp_status;
3411 int duration = le16_to_cpu(params->duration);
3412
bb8c093b 3413 if (iwl4965_is_associated(priv))
b481de9c 3414 add_time =
bb8c093b 3415 iwl4965_usecs_to_beacons(
b481de9c
ZY
3416 le64_to_cpu(params->start_time) - priv->last_tsf,
3417 le16_to_cpu(priv->rxon_timing.beacon_interval));
3418
3419 memset(&spectrum, 0, sizeof(spectrum));
3420
3421 spectrum.channel_count = cpu_to_le16(1);
3422 spectrum.flags =
3423 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3424 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3425 cmd.len = sizeof(spectrum);
3426 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3427
bb8c093b 3428 if (iwl4965_is_associated(priv))
b481de9c 3429 spectrum.start_time =
bb8c093b 3430 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
3431 add_time,
3432 le16_to_cpu(priv->rxon_timing.beacon_interval));
3433 else
3434 spectrum.start_time = 0;
3435
3436 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3437 spectrum.channels[0].channel = params->channel;
3438 spectrum.channels[0].type = type;
3439 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3440 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3441 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3442
bb8c093b 3443 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
3444 if (rc)
3445 return rc;
3446
bb8c093b 3447 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
3448 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3449 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3450 rc = -EIO;
3451 }
3452
3453 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3454 switch (spectrum_resp_status) {
3455 case 0: /* Command will be handled */
3456 if (res->u.spectrum.id != 0xff) {
3457 IWL_DEBUG_INFO
3458 ("Replaced existing measurement: %d\n",
3459 res->u.spectrum.id);
3460 priv->measurement_status &= ~MEASUREMENT_READY;
3461 }
3462 priv->measurement_status |= MEASUREMENT_ACTIVE;
3463 rc = 0;
3464 break;
3465
3466 case 1: /* Command will not be handled */
3467 rc = -EAGAIN;
3468 break;
3469 }
3470
3471 dev_kfree_skb_any(cmd.meta.u.skb);
3472
3473 return rc;
3474}
3475#endif
3476
bb8c093b
CH
3477static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3478 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
3479{
3480
3481 tx_sta->status.ack_signal = 0;
3482 tx_sta->status.excessive_retries = 0;
3483 tx_sta->status.queue_length = 0;
3484 tx_sta->status.queue_number = 0;
3485
3486 if (in_interrupt())
3487 ieee80211_tx_status_irqsafe(priv->hw,
3488 tx_sta->skb[0], &(tx_sta->status));
3489 else
3490 ieee80211_tx_status(priv->hw,
3491 tx_sta->skb[0], &(tx_sta->status));
3492
3493 tx_sta->skb[0] = NULL;
3494}
3495
3496/**
6440adb5 3497 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 3498 *
6440adb5
BC
3499 * When FW advances 'R' index, all entries between old and new 'R' index
3500 * need to be reclaimed. As result, some free space forms. If there is
3501 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 3502 */
bb8c093b 3503int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
b481de9c 3504{
bb8c093b
CH
3505 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3506 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
3507 int nfreed = 0;
3508
3509 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3510 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3511 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 3512 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
3513 return 0;
3514 }
3515
bb8c093b 3516 for (index = iwl4965_queue_inc_wrap(index, q->n_bd);
fc4b6853 3517 q->read_ptr != index;
bb8c093b 3518 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 3519 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 3520 iwl4965_txstatus_to_ieee(priv,
fc4b6853 3521 &(txq->txb[txq->q.read_ptr]));
bb8c093b 3522 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
3523 } else if (nfreed > 1) {
3524 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 3525 q->write_ptr, q->read_ptr);
b481de9c
ZY
3526 queue_work(priv->workqueue, &priv->restart);
3527 }
3528 nfreed++;
3529 }
3530
bb8c093b 3531 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
3532 (txq_id != IWL_CMD_QUEUE_NUM) &&
3533 priv->mac80211_registered)
3534 ieee80211_wake_queue(priv->hw, txq_id);
3535
3536
3537 return nfreed;
3538}
3539
bb8c093b 3540static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
3541{
3542 status &= TX_STATUS_MSK;
3543 return (status == TX_STATUS_SUCCESS)
3544 || (status == TX_STATUS_DIRECT_DONE);
3545}
3546
3547/******************************************************************************
3548 *
3549 * Generic RX handler implementations
3550 *
3551 ******************************************************************************/
c8b0e6e1
CH
3552#ifdef CONFIG_IWL4965_HT
3553#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3554
bb8c093b 3555static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
b481de9c
ZY
3556 struct ieee80211_hdr *hdr)
3557{
3558 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3559 return IWL_AP_ID;
3560 else {
3561 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 3562 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
3563 }
3564}
3565
bb8c093b
CH
3566static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3567 struct iwl4965_priv *priv, int txq_id, int idx)
b481de9c
ZY
3568{
3569 if (priv->txq[txq_id].txb[idx].skb[0])
3570 return (struct ieee80211_hdr *)priv->txq[txq_id].
3571 txb[idx].skb[0]->data;
3572 return NULL;
3573}
3574
bb8c093b 3575static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
3576{
3577 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3578 tx_resp->frame_count);
3579 return le32_to_cpu(*scd_ssn) & MAX_SN;
3580
3581}
6440adb5
BC
3582
3583/**
3584 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3585 */
bb8c093b
CH
3586static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3587 struct iwl4965_ht_agg *agg,
3588 struct iwl4965_tx_resp *tx_resp,
b481de9c
ZY
3589 u16 start_idx)
3590{
3591 u32 status;
3592 __le32 *frame_status = &tx_resp->status;
3593 struct ieee80211_tx_status *tx_status = NULL;
3594 struct ieee80211_hdr *hdr = NULL;
3595 int i, sh;
3596 int txq_id, idx;
3597 u16 seq;
3598
3599 if (agg->wait_for_ba)
6440adb5 3600 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3601
3602 agg->frame_count = tx_resp->frame_count;
3603 agg->start_idx = start_idx;
3604 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3605 agg->bitmap0 = agg->bitmap1 = 0;
3606
6440adb5 3607 /* # frames attempted by Tx command */
b481de9c 3608 if (agg->frame_count == 1) {
6440adb5 3609 /* Only one frame was attempted; no block-ack will arrive */
bb8c093b 3610 struct iwl4965_tx_queue *txq ;
b481de9c
ZY
3611 status = le32_to_cpu(frame_status[0]);
3612
3613 txq_id = agg->txq_id;
3614 txq = &priv->txq[txq_id];
3615 /* FIXME: code repetition */
3616 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3617 agg->frame_count, agg->start_idx);
3618
fc4b6853 3619 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
b481de9c
ZY
3620 tx_status->retry_count = tx_resp->failure_frame;
3621 tx_status->queue_number = status & 0xff;
3622 tx_status->queue_length = tx_resp->bt_kill_count;
3623 tx_status->queue_length |= tx_resp->failure_rts;
3624
bb8c093b 3625 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c
ZY
3626 IEEE80211_TX_STATUS_ACK : 0;
3627 tx_status->control.tx_rate =
bb8c093b 3628 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3629 /* FIXME: code repetition end */
3630
3631 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3632 status & 0xff, tx_resp->failure_frame);
3633 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3634 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3635
3636 agg->wait_for_ba = 0;
3637 } else {
6440adb5 3638 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3639 u64 bitmap = 0;
3640 int start = agg->start_idx;
3641
6440adb5 3642 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3643 for (i = 0; i < agg->frame_count; i++) {
3644 u16 sc;
3645 status = le32_to_cpu(frame_status[i]);
3646 seq = status >> 16;
3647 idx = SEQ_TO_INDEX(seq);
3648 txq_id = SEQ_TO_QUEUE(seq);
3649
3650 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3651 AGG_TX_STATE_ABORT_MSK))
3652 continue;
3653
3654 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3655 agg->frame_count, txq_id, idx);
3656
bb8c093b 3657 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3658
3659 sc = le16_to_cpu(hdr->seq_ctrl);
3660 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3661 IWL_ERROR("BUG_ON idx doesn't match seq control"
3662 " idx=%d, seq_idx=%d, seq=%d\n",
3663 idx, SEQ_TO_SN(sc),
3664 hdr->seq_ctrl);
3665 return -1;
3666 }
3667
3668 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3669 i, idx, SEQ_TO_SN(sc));
3670
3671 sh = idx - start;
3672 if (sh > 64) {
3673 sh = (start - idx) + 0xff;
3674 bitmap = bitmap << sh;
3675 sh = 0;
3676 start = idx;
3677 } else if (sh < -64)
3678 sh = 0xff - (start - idx);
3679 else if (sh < 0) {
3680 sh = start - idx;
3681 start = idx;
3682 bitmap = bitmap << sh;
3683 sh = 0;
3684 }
3685 bitmap |= (1 << sh);
3686 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3687 start, (u32)(bitmap & 0xFFFFFFFF));
3688 }
3689
3690 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3691 agg->bitmap1 = bitmap >> 32;
3692 agg->start_idx = start;
3693 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3694 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3695 agg->frame_count, agg->start_idx,
3696 agg->bitmap0);
3697
3698 if (bitmap)
3699 agg->wait_for_ba = 1;
3700 }
3701 return 0;
3702}
3703#endif
3704#endif
3705
6440adb5
BC
3706/**
3707 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3708 */
bb8c093b
CH
3709static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3710 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3711{
bb8c093b 3712 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3713 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3714 int txq_id = SEQ_TO_QUEUE(sequence);
3715 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3716 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3717 struct ieee80211_tx_status *tx_status;
bb8c093b 3718 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3719 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1
CH
3720#ifdef CONFIG_IWL4965_HT
3721#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
3722 int tid, sta_id;
3723#endif
3724#endif
3725
3726 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3727 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3728 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3729 index, txq->q.n_bd, txq->q.write_ptr,
3730 txq->q.read_ptr);
b481de9c
ZY
3731 return;
3732 }
3733
c8b0e6e1
CH
3734#ifdef CONFIG_IWL4965_HT
3735#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3736 if (txq->sched_retry) {
bb8c093b 3737 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
b481de9c 3738 struct ieee80211_hdr *hdr =
bb8c093b
CH
3739 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3740 struct iwl4965_ht_agg *agg = NULL;
b481de9c
ZY
3741 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3742
3743 if (qc == NULL) {
3744 IWL_ERROR("BUG_ON qc is null!!!!\n");
3745 return;
3746 }
3747
3748 tid = le16_to_cpu(*qc) & 0xf;
3749
bb8c093b 3750 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
b481de9c
ZY
3751 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3752 IWL_ERROR("Station not known for\n");
3753 return;
3754 }
3755
3756 agg = &priv->stations[sta_id].tid[tid].agg;
3757
3758 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3759
3760 if ((tx_resp->frame_count == 1) &&
bb8c093b 3761 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3762 /* TODO: send BAR */
3763 }
3764
fc4b6853 3765 if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
bb8c093b 3766 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3767 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3768 "%d index %d\n", scd_ssn , index);
bb8c093b 3769 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3770 }
3771 } else {
c8b0e6e1
CH
3772#endif /* CONFIG_IWL4965_HT_AGG */
3773#endif /* CONFIG_IWL4965_HT */
fc4b6853 3774 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3775
3776 tx_status->retry_count = tx_resp->failure_frame;
3777 tx_status->queue_number = status;
3778 tx_status->queue_length = tx_resp->bt_kill_count;
3779 tx_status->queue_length |= tx_resp->failure_rts;
3780
3781 tx_status->flags =
bb8c093b 3782 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
b481de9c
ZY
3783
3784 tx_status->control.tx_rate =
bb8c093b 3785 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3786
3787 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3788 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3789 status, le32_to_cpu(tx_resp->rate_n_flags),
3790 tx_resp->failure_frame);
3791
3792 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3793 if (index != -1)
bb8c093b 3794 iwl4965_tx_queue_reclaim(priv, txq_id, index);
c8b0e6e1
CH
3795#ifdef CONFIG_IWL4965_HT
3796#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3797 }
c8b0e6e1
CH
3798#endif /* CONFIG_IWL4965_HT_AGG */
3799#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3800
3801 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3802 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3803}
3804
3805
bb8c093b
CH
3806static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3807 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3808{
bb8c093b
CH
3809 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3810 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3811 struct delayed_work *pwork;
3812
3813 palive = &pkt->u.alive_frame;
3814
3815 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3816 "0x%01X 0x%01X\n",
3817 palive->is_valid, palive->ver_type,
3818 palive->ver_subtype);
3819
3820 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3821 IWL_DEBUG_INFO("Initialization Alive received.\n");
3822 memcpy(&priv->card_alive_init,
3823 &pkt->u.alive_frame,
bb8c093b 3824 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3825 pwork = &priv->init_alive_start;
3826 } else {
3827 IWL_DEBUG_INFO("Runtime Alive received.\n");
3828 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3829 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3830 pwork = &priv->alive_start;
3831 }
3832
3833 /* We delay the ALIVE response by 5ms to
3834 * give the HW RF Kill time to activate... */
3835 if (palive->is_valid == UCODE_VALID_OK)
3836 queue_delayed_work(priv->workqueue, pwork,
3837 msecs_to_jiffies(5));
3838 else
3839 IWL_WARNING("uCode did not respond OK.\n");
3840}
3841
bb8c093b
CH
3842static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3843 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3844{
bb8c093b 3845 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3846
3847 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3848 return;
3849}
3850
bb8c093b
CH
3851static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3852 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3853{
bb8c093b 3854 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3855
3856 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3857 "seq 0x%04X ser 0x%08X\n",
3858 le32_to_cpu(pkt->u.err_resp.error_type),
3859 get_cmd_string(pkt->u.err_resp.cmd_id),
3860 pkt->u.err_resp.cmd_id,
3861 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3862 le32_to_cpu(pkt->u.err_resp.error_info));
3863}
3864
3865#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3866
bb8c093b 3867static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3868{
bb8c093b
CH
3869 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3870 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3871 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3872 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3873 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3874 rxon->channel = csa->channel;
3875 priv->staging_rxon.channel = csa->channel;
3876}
3877
bb8c093b
CH
3878static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3879 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3880{
c8b0e6e1 3881#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3882 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3883 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3884
3885 if (!report->state) {
3886 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3887 "Spectrum Measure Notification: Start\n");
3888 return;
3889 }
3890
3891 memcpy(&priv->measure_report, report, sizeof(*report));
3892 priv->measurement_status |= MEASUREMENT_READY;
3893#endif
3894}
3895
bb8c093b
CH
3896static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3897 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3898{
c8b0e6e1 3899#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3900 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3901 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3902 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3903 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3904#endif
3905}
3906
bb8c093b
CH
3907static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv,
3908 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3909{
bb8c093b 3910 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3911 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3912 "notification for %s:\n",
3913 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
bb8c093b 3914 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3915}
3916
bb8c093b 3917static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3918{
bb8c093b
CH
3919 struct iwl4965_priv *priv =
3920 container_of(work, struct iwl4965_priv, beacon_update);
b481de9c
ZY
3921 struct sk_buff *beacon;
3922
3923 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
32bfd35d 3924 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL);
b481de9c
ZY
3925
3926 if (!beacon) {
3927 IWL_ERROR("update beacon failed\n");
3928 return;
3929 }
3930
3931 mutex_lock(&priv->mutex);
3932 /* new beacon skb is allocated every time; dispose previous.*/
3933 if (priv->ibss_beacon)
3934 dev_kfree_skb(priv->ibss_beacon);
3935
3936 priv->ibss_beacon = beacon;
3937 mutex_unlock(&priv->mutex);
3938
bb8c093b 3939 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3940}
3941
bb8c093b
CH
3942static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3943 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3944{
c8b0e6e1 3945#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3946 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3947 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3948 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3949
3950 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3951 "tsf %d %d rate %d\n",
3952 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3953 beacon->beacon_notify_hdr.failure_frame,
3954 le32_to_cpu(beacon->ibss_mgr_status),
3955 le32_to_cpu(beacon->high_tsf),
3956 le32_to_cpu(beacon->low_tsf), rate);
3957#endif
3958
3959 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3960 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3961 queue_work(priv->workqueue, &priv->beacon_update);
3962}
3963
3964/* Service response to REPLY_SCAN_CMD (0x80) */
bb8c093b
CH
3965static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
3966 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3967{
c8b0e6e1 3968#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3969 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3970 struct iwl4965_scanreq_notification *notif =
3971 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
3972
3973 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3974#endif
3975}
3976
3977/* Service SCAN_START_NOTIFICATION (0x82) */
bb8c093b
CH
3978static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
3979 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3980{
bb8c093b
CH
3981 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3982 struct iwl4965_scanstart_notification *notif =
3983 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
3984 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3985 IWL_DEBUG_SCAN("Scan start: "
3986 "%d [802.11%s] "
3987 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3988 notif->channel,
3989 notif->band ? "bg" : "a",
3990 notif->tsf_high,
3991 notif->tsf_low, notif->status, notif->beacon_timer);
3992}
3993
3994/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
bb8c093b
CH
3995static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
3996 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3997{
bb8c093b
CH
3998 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3999 struct iwl4965_scanresults_notification *notif =
4000 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
4001
4002 IWL_DEBUG_SCAN("Scan ch.res: "
4003 "%d [802.11%s] "
4004 "(TSF: 0x%08X:%08X) - %d "
4005 "elapsed=%lu usec (%dms since last)\n",
4006 notif->channel,
4007 notif->band ? "bg" : "a",
4008 le32_to_cpu(notif->tsf_high),
4009 le32_to_cpu(notif->tsf_low),
4010 le32_to_cpu(notif->statistics[0]),
4011 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
4012 jiffies_to_msecs(elapsed_jiffies
4013 (priv->last_scan_jiffies, jiffies)));
4014
4015 priv->last_scan_jiffies = jiffies;
7878a5a4 4016 priv->next_scan_jiffies = 0;
b481de9c
ZY
4017}
4018
4019/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
bb8c093b
CH
4020static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv,
4021 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4022{
bb8c093b
CH
4023 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4024 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
4025
4026 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
4027 scan_notif->scanned_channels,
4028 scan_notif->tsf_low,
4029 scan_notif->tsf_high, scan_notif->status);
4030
4031 /* The HW is no longer scanning */
4032 clear_bit(STATUS_SCAN_HW, &priv->status);
4033
4034 /* The scan completion notification came in, so kill that timer... */
4035 cancel_delayed_work(&priv->scan_check);
4036
4037 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
4038 (priv->scan_bands == 2) ? "2.4" : "5.2",
4039 jiffies_to_msecs(elapsed_jiffies
4040 (priv->scan_pass_start, jiffies)));
4041
4042 /* Remove this scanned band from the list
4043 * of pending bands to scan */
4044 priv->scan_bands--;
4045
4046 /* If a request to abort was given, or the scan did not succeed
4047 * then we reset the scan state machine and terminate,
4048 * re-queuing another scan if one has been requested */
4049 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4050 IWL_DEBUG_INFO("Aborted scan completed.\n");
4051 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
4052 } else {
4053 /* If there are more bands on this scan pass reschedule */
4054 if (priv->scan_bands > 0)
4055 goto reschedule;
4056 }
4057
4058 priv->last_scan_jiffies = jiffies;
7878a5a4 4059 priv->next_scan_jiffies = 0;
b481de9c
ZY
4060 IWL_DEBUG_INFO("Setting scan to off\n");
4061
4062 clear_bit(STATUS_SCANNING, &priv->status);
4063
4064 IWL_DEBUG_INFO("Scan took %dms\n",
4065 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4066
4067 queue_work(priv->workqueue, &priv->scan_completed);
4068
4069 return;
4070
4071reschedule:
4072 priv->scan_pass_start = jiffies;
4073 queue_work(priv->workqueue, &priv->request_scan);
4074}
4075
4076/* Handle notification from uCode that card's power state is changing
4077 * due to software, hardware, or critical temperature RFKILL */
bb8c093b
CH
4078static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4079 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4080{
bb8c093b 4081 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4082 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4083 unsigned long status = priv->status;
4084
4085 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4086 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4087 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4088
4089 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4090 RF_CARD_DISABLED)) {
4091
bb8c093b 4092 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
4093 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4094
bb8c093b
CH
4095 if (!iwl4965_grab_nic_access(priv)) {
4096 iwl4965_write_direct32(
b481de9c
ZY
4097 priv, HBUS_TARG_MBX_C,
4098 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4099
bb8c093b 4100 iwl4965_release_nic_access(priv);
b481de9c
ZY
4101 }
4102
4103 if (!(flags & RXON_CARD_DISABLED)) {
bb8c093b 4104 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 4105 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
4106 if (!iwl4965_grab_nic_access(priv)) {
4107 iwl4965_write_direct32(
b481de9c
ZY
4108 priv, HBUS_TARG_MBX_C,
4109 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4110
bb8c093b 4111 iwl4965_release_nic_access(priv);
b481de9c
ZY
4112 }
4113 }
4114
4115 if (flags & RF_CARD_DISABLED) {
bb8c093b 4116 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 4117 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
bb8c093b
CH
4118 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
4119 if (!iwl4965_grab_nic_access(priv))
4120 iwl4965_release_nic_access(priv);
b481de9c
ZY
4121 }
4122 }
4123
4124 if (flags & HW_CARD_DISABLED)
4125 set_bit(STATUS_RF_KILL_HW, &priv->status);
4126 else
4127 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4128
4129
4130 if (flags & SW_CARD_DISABLED)
4131 set_bit(STATUS_RF_KILL_SW, &priv->status);
4132 else
4133 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4134
4135 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 4136 iwl4965_scan_cancel(priv);
b481de9c
ZY
4137
4138 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4139 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4140 (test_bit(STATUS_RF_KILL_SW, &status) !=
4141 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4142 queue_work(priv->workqueue, &priv->rf_kill);
4143 else
4144 wake_up_interruptible(&priv->wait_command_queue);
4145}
4146
4147/**
bb8c093b 4148 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
4149 *
4150 * Setup the RX handlers for each of the reply types sent from the uCode
4151 * to the host.
4152 *
4153 * This function chains into the hardware specific files for them to setup
4154 * any hardware specific handlers as well.
4155 */
bb8c093b 4156static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
b481de9c 4157{
bb8c093b
CH
4158 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4159 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
4160 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
4161 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 4162 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
4163 iwl4965_rx_spectrum_measure_notif;
4164 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 4165 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
4166 iwl4965_rx_pm_debug_statistics_notif;
4167 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 4168
9fbab516
BC
4169 /*
4170 * The same handler is used for both the REPLY to a discrete
4171 * statistics request from the host as well as for the periodic
4172 * statistics notifications (after received beacons) from the uCode.
b481de9c 4173 */
bb8c093b
CH
4174 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
4175 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 4176
bb8c093b
CH
4177 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
4178 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 4179 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 4180 iwl4965_rx_scan_results_notif;
b481de9c 4181 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
4182 iwl4965_rx_scan_complete_notif;
4183 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
4184 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 4185
9fbab516 4186 /* Set up hardware specific Rx handlers */
bb8c093b 4187 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
4188}
4189
4190/**
bb8c093b 4191 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
4192 * @rxb: Rx buffer to reclaim
4193 *
4194 * If an Rx buffer has an async callback associated with it the callback
4195 * will be executed. The attached skb (if present) will only be freed
4196 * if the callback returns 1
4197 */
bb8c093b
CH
4198static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4199 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4200{
bb8c093b 4201 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4202 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4203 int txq_id = SEQ_TO_QUEUE(sequence);
4204 int index = SEQ_TO_INDEX(sequence);
4205 int huge = sequence & SEQ_HUGE_FRAME;
4206 int cmd_index;
bb8c093b 4207 struct iwl4965_cmd *cmd;
b481de9c
ZY
4208
4209 /* If a Tx command is being handled and it isn't in the actual
4210 * command queue then there a command routing bug has been introduced
4211 * in the queue management code. */
4212 if (txq_id != IWL_CMD_QUEUE_NUM)
4213 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4214 txq_id, pkt->hdr.cmd);
4215 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4216
4217 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4218 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4219
4220 /* Input error checking is done when commands are added to queue. */
4221 if (cmd->meta.flags & CMD_WANT_SKB) {
4222 cmd->meta.source->u.skb = rxb->skb;
4223 rxb->skb = NULL;
4224 } else if (cmd->meta.u.callback &&
4225 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4226 rxb->skb = NULL;
4227
bb8c093b 4228 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
4229
4230 if (!(cmd->meta.flags & CMD_ASYNC)) {
4231 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4232 wake_up_interruptible(&priv->wait_command_queue);
4233 }
4234}
4235
4236/************************** RX-FUNCTIONS ****************************/
4237/*
4238 * Rx theory of operation
4239 *
9fbab516
BC
4240 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
4241 * each of which point to Receive Buffers to be filled by 4965. These get
4242 * used not only for Rx frames, but for any command response or notification
4243 * from the 4965. The driver and 4965 manage the Rx buffers by means
4244 * of indexes into the circular buffer.
b481de9c
ZY
4245 *
4246 * Rx Queue Indexes
4247 * The host/firmware share two index registers for managing the Rx buffers.
4248 *
4249 * The READ index maps to the first position that the firmware may be writing
4250 * to -- the driver can read up to (but not including) this position and get
4251 * good data.
4252 * The READ index is managed by the firmware once the card is enabled.
4253 *
4254 * The WRITE index maps to the last position the driver has read from -- the
4255 * position preceding WRITE is the last slot the firmware can place a packet.
4256 *
4257 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4258 * WRITE = READ.
4259 *
9fbab516 4260 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
4261 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4262 *
9fbab516 4263 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
4264 * and fire the RX interrupt. The driver can then query the READ index and
4265 * process as many packets as possible, moving the WRITE index forward as it
4266 * resets the Rx queue buffers with new memory.
4267 *
4268 * The management in the driver is as follows:
4269 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4270 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 4271 * to replenish the iwl->rxq->rx_free.
bb8c093b 4272 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
4273 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4274 * 'processed' and 'read' driver indexes as well)
4275 * + A received packet is processed and handed to the kernel network stack,
4276 * detached from the iwl->rxq. The driver 'processed' index is updated.
4277 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4278 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4279 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4280 * were enough free buffers and RX_STALLED is set it is cleared.
4281 *
4282 *
4283 * Driver sequence:
4284 *
9fbab516
BC
4285 * iwl4965_rx_queue_alloc() Allocates rx_free
4286 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 4287 * iwl4965_rx_queue_restock
9fbab516 4288 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
4289 * queue, updates firmware pointers, and updates
4290 * the WRITE index. If insufficient rx_free buffers
bb8c093b 4291 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
4292 *
4293 * -- enable interrupts --
9fbab516 4294 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
4295 * READ INDEX, detaching the SKB from the pool.
4296 * Moves the packet buffer from queue to rx_used.
bb8c093b 4297 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
4298 * slots.
4299 * ...
4300 *
4301 */
4302
4303/**
bb8c093b 4304 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 4305 */
bb8c093b 4306static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
4307{
4308 int s = q->read - q->write;
4309 if (s <= 0)
4310 s += RX_QUEUE_SIZE;
4311 /* keep some buffer to not confuse full and empty queue */
4312 s -= 2;
4313 if (s < 0)
4314 s = 0;
4315 return s;
4316}
4317
4318/**
bb8c093b 4319 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 4320 */
bb8c093b 4321int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
4322{
4323 u32 reg = 0;
4324 int rc = 0;
4325 unsigned long flags;
4326
4327 spin_lock_irqsave(&q->lock, flags);
4328
4329 if (q->need_update == 0)
4330 goto exit_unlock;
4331
6440adb5 4332 /* If power-saving is in use, make sure device is awake */
b481de9c 4333 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
bb8c093b 4334 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4335
4336 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
bb8c093b 4337 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4338 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4339 goto exit_unlock;
4340 }
4341
bb8c093b 4342 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4343 if (rc)
4344 goto exit_unlock;
4345
6440adb5 4346 /* Device expects a multiple of 8 */
bb8c093b 4347 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 4348 q->write & ~0x7);
bb8c093b 4349 iwl4965_release_nic_access(priv);
6440adb5
BC
4350
4351 /* Else device is assumed to be awake */
b481de9c 4352 } else
6440adb5 4353 /* Device expects a multiple of 8 */
bb8c093b 4354 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
4355
4356
4357 q->need_update = 0;
4358
4359 exit_unlock:
4360 spin_unlock_irqrestore(&q->lock, flags);
4361 return rc;
4362}
4363
4364/**
9fbab516 4365 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 4366 */
bb8c093b 4367static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
b481de9c
ZY
4368 dma_addr_t dma_addr)
4369{
4370 return cpu_to_le32((u32)(dma_addr >> 8));
4371}
4372
4373
4374/**
bb8c093b 4375 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 4376 *
9fbab516 4377 * If there are slots in the RX queue that need to be restocked,
b481de9c 4378 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 4379 * as we can, pulling from rx_free.
b481de9c
ZY
4380 *
4381 * This moves the 'write' index forward to catch up with 'processed', and
4382 * also updates the memory address in the firmware to reference the new
4383 * target buffer.
4384 */
bb8c093b 4385static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
b481de9c 4386{
bb8c093b 4387 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4388 struct list_head *element;
bb8c093b 4389 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4390 unsigned long flags;
4391 int write, rc;
4392
4393 spin_lock_irqsave(&rxq->lock, flags);
4394 write = rxq->write & ~0x7;
bb8c093b 4395 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 4396 /* Get next free Rx buffer, remove from free list */
b481de9c 4397 element = rxq->rx_free.next;
bb8c093b 4398 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 4399 list_del(element);
6440adb5
BC
4400
4401 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 4402 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
4403 rxq->queue[rxq->write] = rxb;
4404 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4405 rxq->free_count--;
4406 }
4407 spin_unlock_irqrestore(&rxq->lock, flags);
4408 /* If the pre-allocated buffer pool is dropping low, schedule to
4409 * refill it */
4410 if (rxq->free_count <= RX_LOW_WATERMARK)
4411 queue_work(priv->workqueue, &priv->rx_replenish);
4412
4413
6440adb5
BC
4414 /* If we've added more space for the firmware to place data, tell it.
4415 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
4416 if ((write != (rxq->write & ~0x7))
4417 || (abs(rxq->write - rxq->read) > 7)) {
4418 spin_lock_irqsave(&rxq->lock, flags);
4419 rxq->need_update = 1;
4420 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 4421 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
4422 if (rc)
4423 return rc;
4424 }
4425
4426 return 0;
4427}
4428
4429/**
bb8c093b 4430 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
4431 *
4432 * When moving to rx_free an SKB is allocated for the slot.
4433 *
bb8c093b 4434 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 4435 * This is called as a scheduled work item (except for during initialization)
b481de9c 4436 */
5c0eef96 4437static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
b481de9c 4438{
bb8c093b 4439 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4440 struct list_head *element;
bb8c093b 4441 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4442 unsigned long flags;
4443 spin_lock_irqsave(&rxq->lock, flags);
4444 while (!list_empty(&rxq->rx_used)) {
4445 element = rxq->rx_used.next;
bb8c093b 4446 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
BC
4447
4448 /* Alloc a new receive buffer */
b481de9c 4449 rxb->skb =
9ee1ba47
RR
4450 alloc_skb(priv->hw_setting.rx_buf_size,
4451 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
4452 if (!rxb->skb) {
4453 if (net_ratelimit())
4454 printk(KERN_CRIT DRV_NAME
4455 ": Can not allocate SKB buffers\n");
4456 /* We don't reschedule replenish work here -- we will
4457 * call the restock method and if it still needs
4458 * more buffers it will schedule replenish */
4459 break;
4460 }
4461 priv->alloc_rxb_skb++;
4462 list_del(element);
6440adb5
BC
4463
4464 /* Get physical address of RB/SKB */
b481de9c
ZY
4465 rxb->dma_addr =
4466 pci_map_single(priv->pci_dev, rxb->skb->data,
9ee1ba47 4467 priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE);
b481de9c
ZY
4468 list_add_tail(&rxb->list, &rxq->rx_free);
4469 rxq->free_count++;
4470 }
4471 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
4472}
4473
4474/*
4475 * this should be called while priv->lock is locked
4476*/
4fd1f841 4477static void __iwl4965_rx_replenish(void *data)
5c0eef96
MA
4478{
4479 struct iwl4965_priv *priv = data;
4480
4481 iwl4965_rx_allocate(priv);
4482 iwl4965_rx_queue_restock(priv);
4483}
4484
4485
4486void iwl4965_rx_replenish(void *data)
4487{
4488 struct iwl4965_priv *priv = data;
4489 unsigned long flags;
4490
4491 iwl4965_rx_allocate(priv);
b481de9c
ZY
4492
4493 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4494 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4495 spin_unlock_irqrestore(&priv->lock, flags);
4496}
4497
4498/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 4499 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
4500 * This free routine walks the list of POOL entries and if SKB is set to
4501 * non NULL it is unmapped and freed
4502 */
bb8c093b 4503static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4504{
4505 int i;
4506 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4507 if (rxq->pool[i].skb != NULL) {
4508 pci_unmap_single(priv->pci_dev,
4509 rxq->pool[i].dma_addr,
9ee1ba47
RR
4510 priv->hw_setting.rx_buf_size,
4511 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4512 dev_kfree_skb(rxq->pool[i].skb);
4513 }
4514 }
4515
4516 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4517 rxq->dma_addr);
4518 rxq->bd = NULL;
4519}
4520
bb8c093b 4521int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
b481de9c 4522{
bb8c093b 4523 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4524 struct pci_dev *dev = priv->pci_dev;
4525 int i;
4526
4527 spin_lock_init(&rxq->lock);
4528 INIT_LIST_HEAD(&rxq->rx_free);
4529 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
BC
4530
4531 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
4532 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4533 if (!rxq->bd)
4534 return -ENOMEM;
6440adb5 4535
b481de9c
ZY
4536 /* Fill the rx_used queue with _all_ of the Rx buffers */
4537 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4538 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 4539
b481de9c
ZY
4540 /* Set us so that we have processed and used all buffers, but have
4541 * not restocked the Rx queue with fresh buffers */
4542 rxq->read = rxq->write = 0;
4543 rxq->free_count = 0;
4544 rxq->need_update = 0;
4545 return 0;
4546}
4547
bb8c093b 4548void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4549{
4550 unsigned long flags;
4551 int i;
4552 spin_lock_irqsave(&rxq->lock, flags);
4553 INIT_LIST_HEAD(&rxq->rx_free);
4554 INIT_LIST_HEAD(&rxq->rx_used);
4555 /* Fill the rx_used queue with _all_ of the Rx buffers */
4556 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4557 /* In the reset function, these buffers may have been allocated
4558 * to an SKB, so we need to unmap and free potential storage */
4559 if (rxq->pool[i].skb != NULL) {
4560 pci_unmap_single(priv->pci_dev,
4561 rxq->pool[i].dma_addr,
9ee1ba47
RR
4562 priv->hw_setting.rx_buf_size,
4563 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4564 priv->alloc_rxb_skb--;
4565 dev_kfree_skb(rxq->pool[i].skb);
4566 rxq->pool[i].skb = NULL;
4567 }
4568 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4569 }
4570
4571 /* Set us so that we have processed and used all buffers, but have
4572 * not restocked the Rx queue with fresh buffers */
4573 rxq->read = rxq->write = 0;
4574 rxq->free_count = 0;
4575 spin_unlock_irqrestore(&rxq->lock, flags);
4576}
4577
4578/* Convert linear signal-to-noise ratio into dB */
4579static u8 ratio2dB[100] = {
4580/* 0 1 2 3 4 5 6 7 8 9 */
4581 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4582 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4583 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4584 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4585 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4586 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4587 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4588 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4589 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4590 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4591};
4592
4593/* Calculates a relative dB value from a ratio of linear
4594 * (i.e. not dB) signal levels.
4595 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4596int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4597{
c899a575
AB
4598 /* 1000:1 or higher just report as 60 dB */
4599 if (sig_ratio >= 1000)
b481de9c
ZY
4600 return 60;
4601
c899a575 4602 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4603 * add 20 dB to make up for divide by 10 */
c899a575 4604 if (sig_ratio >= 100)
b481de9c
ZY
4605 return (20 + (int)ratio2dB[sig_ratio/10]);
4606
4607 /* We shouldn't see this */
4608 if (sig_ratio < 1)
4609 return 0;
4610
4611 /* Use table for ratios 1:1 - 99:1 */
4612 return (int)ratio2dB[sig_ratio];
4613}
4614
4615#define PERFECT_RSSI (-20) /* dBm */
4616#define WORST_RSSI (-95) /* dBm */
4617#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4618
4619/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4620 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4621 * about formulas used below. */
bb8c093b 4622int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4623{
4624 int sig_qual;
4625 int degradation = PERFECT_RSSI - rssi_dbm;
4626
4627 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4628 * as indicator; formula is (signal dbm - noise dbm).
4629 * SNR at or above 40 is a great signal (100%).
4630 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4631 * Weakest usable signal is usually 10 - 15 dB SNR. */
4632 if (noise_dbm) {
4633 if (rssi_dbm - noise_dbm >= 40)
4634 return 100;
4635 else if (rssi_dbm < noise_dbm)
4636 return 0;
4637 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4638
4639 /* Else use just the signal level.
4640 * This formula is a least squares fit of data points collected and
4641 * compared with a reference system that had a percentage (%) display
4642 * for signal quality. */
4643 } else
4644 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4645 (15 * RSSI_RANGE + 62 * degradation)) /
4646 (RSSI_RANGE * RSSI_RANGE);
4647
4648 if (sig_qual > 100)
4649 sig_qual = 100;
4650 else if (sig_qual < 1)
4651 sig_qual = 0;
4652
4653 return sig_qual;
4654}
4655
4656/**
9fbab516 4657 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4658 *
4659 * Uses the priv->rx_handlers callback function array to invoke
4660 * the appropriate handlers, including command responses,
4661 * frame-received notifications, and other notifications.
4662 */
bb8c093b 4663static void iwl4965_rx_handle(struct iwl4965_priv *priv)
b481de9c 4664{
bb8c093b
CH
4665 struct iwl4965_rx_mem_buffer *rxb;
4666 struct iwl4965_rx_packet *pkt;
4667 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4668 u32 r, i;
4669 int reclaim;
4670 unsigned long flags;
5c0eef96
MA
4671 u8 fill_rx = 0;
4672 u32 count = 0;
b481de9c 4673
6440adb5
BC
4674 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4675 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4676 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4677 i = rxq->read;
4678
4679 /* Rx interrupt, but nothing sent from uCode */
4680 if (i == r)
4681 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4682
5c0eef96
MA
4683 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
4684 fill_rx = 1;
4685
b481de9c
ZY
4686 while (i != r) {
4687 rxb = rxq->queue[i];
4688
9fbab516 4689 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4690 * then a bug has been introduced in the queue refilling
4691 * routines -- catch it here */
4692 BUG_ON(rxb == NULL);
4693
4694 rxq->queue[i] = NULL;
4695
4696 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
9ee1ba47 4697 priv->hw_setting.rx_buf_size,
b481de9c 4698 PCI_DMA_FROMDEVICE);
bb8c093b 4699 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4700
4701 /* Reclaim a command buffer only if this packet is a response
4702 * to a (driver-originated) command.
4703 * If the packet (e.g. Rx frame) originated from uCode,
4704 * there is no command buffer to reclaim.
4705 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4706 * but apparently a few don't get set; catch them here. */
4707 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4708 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4709 (pkt->hdr.cmd != REPLY_4965_RX) &&
cfe01709 4710 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4711 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4712 (pkt->hdr.cmd != REPLY_TX);
4713
4714 /* Based on type of command response or notification,
4715 * handle those that need handling via function in
bb8c093b 4716 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4717 if (priv->rx_handlers[pkt->hdr.cmd]) {
4718 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4719 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4720 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4721 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4722 } else {
4723 /* No handling needed */
4724 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4725 "r %d i %d No handler needed for %s, 0x%02x\n",
4726 r, i, get_cmd_string(pkt->hdr.cmd),
4727 pkt->hdr.cmd);
4728 }
4729
4730 if (reclaim) {
9fbab516
BC
4731 /* Invoke any callbacks, transfer the skb to caller, and
4732 * fire off the (possibly) blocking iwl4965_send_cmd()
b481de9c
ZY
4733 * as we reclaim the driver command queue */
4734 if (rxb && rxb->skb)
bb8c093b 4735 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4736 else
4737 IWL_WARNING("Claim null rxb?\n");
4738 }
4739
4740 /* For now we just don't re-use anything. We can tweak this
4741 * later to try and re-use notification packets and SKBs that
4742 * fail to Rx correctly */
4743 if (rxb->skb != NULL) {
4744 priv->alloc_rxb_skb--;
4745 dev_kfree_skb_any(rxb->skb);
4746 rxb->skb = NULL;
4747 }
4748
4749 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
9ee1ba47
RR
4750 priv->hw_setting.rx_buf_size,
4751 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4752 spin_lock_irqsave(&rxq->lock, flags);
4753 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4754 spin_unlock_irqrestore(&rxq->lock, flags);
4755 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
4756 /* If there are a lot of unused frames,
4757 * restock the Rx queue so ucode wont assert. */
4758 if (fill_rx) {
4759 count++;
4760 if (count >= 8) {
4761 priv->rxq.read = i;
4762 __iwl4965_rx_replenish(priv);
4763 count = 0;
4764 }
4765 }
b481de9c
ZY
4766 }
4767
4768 /* Backtrack one entry */
4769 priv->rxq.read = i;
bb8c093b 4770 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4771}
4772
6440adb5
BC
4773/**
4774 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4775 */
bb8c093b
CH
4776static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4777 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4778{
4779 u32 reg = 0;
4780 int rc = 0;
4781 int txq_id = txq->q.id;
4782
4783 if (txq->need_update == 0)
4784 return rc;
4785
4786 /* if we're trying to save power */
4787 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4788 /* wake up nic if it's powered down ...
4789 * uCode will wake up, and interrupt us again, so next
4790 * time we'll skip this part. */
bb8c093b 4791 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4792
4793 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4794 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
bb8c093b 4795 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4796 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4797 return rc;
4798 }
4799
4800 /* restore this queue's parameters in nic hardware. */
bb8c093b 4801 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4802 if (rc)
4803 return rc;
bb8c093b 4804 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4805 txq->q.write_ptr | (txq_id << 8));
bb8c093b 4806 iwl4965_release_nic_access(priv);
b481de9c
ZY
4807
4808 /* else not in power-save mode, uCode will never sleep when we're
4809 * trying to tx (during RFKILL, we're not trying to tx). */
4810 } else
bb8c093b 4811 iwl4965_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4812 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4813
4814 txq->need_update = 0;
4815
4816 return rc;
4817}
4818
c8b0e6e1 4819#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 4820static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4821{
0795af57
JP
4822 DECLARE_MAC_BUF(mac);
4823
b481de9c 4824 IWL_DEBUG_RADIO("RX CONFIG:\n");
bb8c093b 4825 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4826 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4827 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4828 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4829 le32_to_cpu(rxon->filter_flags));
4830 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4831 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4832 rxon->ofdm_basic_rates);
4833 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4834 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4835 print_mac(mac, rxon->node_addr));
4836 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4837 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4838 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4839}
4840#endif
4841
bb8c093b 4842static void iwl4965_enable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4843{
4844 IWL_DEBUG_ISR("Enabling interrupts\n");
4845 set_bit(STATUS_INT_ENABLED, &priv->status);
bb8c093b 4846 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4847}
4848
bb8c093b 4849static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4850{
4851 clear_bit(STATUS_INT_ENABLED, &priv->status);
4852
4853 /* disable interrupts from uCode/NIC to host */
bb8c093b 4854 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4855
4856 /* acknowledge/clear/reset any interrupts still pending
4857 * from uCode or flow handler (Rx/Tx DMA) */
bb8c093b
CH
4858 iwl4965_write32(priv, CSR_INT, 0xffffffff);
4859 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4860 IWL_DEBUG_ISR("Disabled interrupts\n");
4861}
4862
4863static const char *desc_lookup(int i)
4864{
4865 switch (i) {
4866 case 1:
4867 return "FAIL";
4868 case 2:
4869 return "BAD_PARAM";
4870 case 3:
4871 return "BAD_CHECKSUM";
4872 case 4:
4873 return "NMI_INTERRUPT";
4874 case 5:
4875 return "SYSASSERT";
4876 case 6:
4877 return "FATAL_ERROR";
4878 }
4879
4880 return "UNKNOWN";
4881}
4882
4883#define ERROR_START_OFFSET (1 * sizeof(u32))
4884#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4885
bb8c093b 4886static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
b481de9c
ZY
4887{
4888 u32 data2, line;
4889 u32 desc, time, count, base, data1;
4890 u32 blink1, blink2, ilink1, ilink2;
4891 int rc;
4892
4893 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4894
bb8c093b 4895 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4896 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4897 return;
4898 }
4899
bb8c093b 4900 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4901 if (rc) {
4902 IWL_WARNING("Can not read from adapter at this time.\n");
4903 return;
4904 }
4905
bb8c093b 4906 count = iwl4965_read_targ_mem(priv, base);
b481de9c
ZY
4907
4908 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4909 IWL_ERROR("Start IWL Error Log Dump:\n");
4910 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4911 priv->status, priv->config, count);
4912 }
4913
bb8c093b
CH
4914 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32));
4915 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32));
4916 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32));
4917 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32));
4918 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32));
4919 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32));
4920 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32));
4921 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32));
4922 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4923
4924 IWL_ERROR("Desc Time "
4925 "data1 data2 line\n");
4926 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4927 desc_lookup(desc), desc, time, data1, data2, line);
4928 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4929 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4930 ilink1, ilink2);
4931
bb8c093b 4932 iwl4965_release_nic_access(priv);
b481de9c
ZY
4933}
4934
4935#define EVENT_START_OFFSET (4 * sizeof(u32))
4936
4937/**
bb8c093b 4938 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4939 *
bb8c093b 4940 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
b481de9c 4941 */
bb8c093b 4942static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
b481de9c
ZY
4943 u32 num_events, u32 mode)
4944{
4945 u32 i;
4946 u32 base; /* SRAM byte address of event log header */
4947 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4948 u32 ptr; /* SRAM byte address of log data */
4949 u32 ev, time, data; /* event log data */
4950
4951 if (num_events == 0)
4952 return;
4953
4954 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4955
4956 if (mode == 0)
4957 event_size = 2 * sizeof(u32);
4958 else
4959 event_size = 3 * sizeof(u32);
4960
4961 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4962
4963 /* "time" is actually "data" for mode 0 (no timestamp).
4964 * place event id # at far right for easier visual parsing. */
4965 for (i = 0; i < num_events; i++) {
bb8c093b 4966 ev = iwl4965_read_targ_mem(priv, ptr);
b481de9c 4967 ptr += sizeof(u32);
bb8c093b 4968 time = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4969 ptr += sizeof(u32);
4970 if (mode == 0)
4971 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4972 else {
bb8c093b 4973 data = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4974 ptr += sizeof(u32);
4975 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4976 }
4977 }
4978}
4979
bb8c093b 4980static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
b481de9c
ZY
4981{
4982 int rc;
4983 u32 base; /* SRAM byte address of event log header */
4984 u32 capacity; /* event log capacity in # entries */
4985 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4986 u32 num_wraps; /* # times uCode wrapped to top of log */
4987 u32 next_entry; /* index of next entry to be written by uCode */
4988 u32 size; /* # entries that we'll print */
4989
4990 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 4991 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4992 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4993 return;
4994 }
4995
bb8c093b 4996 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4997 if (rc) {
4998 IWL_WARNING("Can not read from adapter at this time.\n");
4999 return;
5000 }
5001
5002 /* event log header */
bb8c093b
CH
5003 capacity = iwl4965_read_targ_mem(priv, base);
5004 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32)));
5005 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32)));
5006 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
5007
5008 size = num_wraps ? capacity : next_entry;
5009
5010 /* bail out if nothing in log */
5011 if (size == 0) {
583fab37 5012 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
bb8c093b 5013 iwl4965_release_nic_access(priv);
b481de9c
ZY
5014 return;
5015 }
5016
583fab37 5017 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
5018 size, num_wraps);
5019
5020 /* if uCode has wrapped back to top of log, start at the oldest entry,
5021 * i.e the next one that uCode would fill. */
5022 if (num_wraps)
bb8c093b 5023 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
5024 capacity - next_entry, mode);
5025
5026 /* (then/else) start at top of log */
bb8c093b 5027 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 5028
bb8c093b 5029 iwl4965_release_nic_access(priv);
b481de9c
ZY
5030}
5031
5032/**
bb8c093b 5033 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 5034 */
bb8c093b 5035static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
b481de9c 5036{
bb8c093b 5037 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
5038 set_bit(STATUS_FW_ERROR, &priv->status);
5039
5040 /* Cancel currently queued command. */
5041 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5042
c8b0e6e1 5043#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5044 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) {
5045 iwl4965_dump_nic_error_log(priv);
5046 iwl4965_dump_nic_event_log(priv);
5047 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
5048 }
5049#endif
5050
5051 wake_up_interruptible(&priv->wait_command_queue);
5052
5053 /* Keep the restart process from trying to send host
5054 * commands by clearing the INIT status bit */
5055 clear_bit(STATUS_READY, &priv->status);
5056
5057 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5058 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5059 "Restarting adapter due to uCode error.\n");
5060
bb8c093b 5061 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5062 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5063 sizeof(priv->recovery_rxon));
5064 priv->error_recovering = 1;
5065 }
5066 queue_work(priv->workqueue, &priv->restart);
5067 }
5068}
5069
bb8c093b 5070static void iwl4965_error_recovery(struct iwl4965_priv *priv)
b481de9c
ZY
5071{
5072 unsigned long flags;
5073
5074 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
5075 sizeof(priv->staging_rxon));
5076 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5077 iwl4965_commit_rxon(priv);
b481de9c 5078
bb8c093b 5079 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
5080
5081 spin_lock_irqsave(&priv->lock, flags);
5082 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
5083 priv->error_recovering = 0;
5084 spin_unlock_irqrestore(&priv->lock, flags);
5085}
5086
bb8c093b 5087static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
b481de9c
ZY
5088{
5089 u32 inta, handled = 0;
5090 u32 inta_fh;
5091 unsigned long flags;
c8b0e6e1 5092#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
5093 u32 inta_mask;
5094#endif
5095
5096 spin_lock_irqsave(&priv->lock, flags);
5097
5098 /* Ack/clear/reset pending uCode interrupts.
5099 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5100 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
bb8c093b
CH
5101 inta = iwl4965_read32(priv, CSR_INT);
5102 iwl4965_write32(priv, CSR_INT, inta);
b481de9c
ZY
5103
5104 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5105 * Any new interrupts that happen after this, either while we're
5106 * in this tasklet, or later, will show up in next ISR/tasklet. */
bb8c093b
CH
5107 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
5108 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 5109
c8b0e6e1 5110#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5111 if (iwl4965_debug_level & IWL_DL_ISR) {
9fbab516
BC
5112 /* just for debug */
5113 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
b481de9c
ZY
5114 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5115 inta, inta_mask, inta_fh);
5116 }
5117#endif
5118
5119 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5120 * atomic, make sure that inta covers all the interrupts that
5121 * we've discovered, even if FH interrupt came in just after
5122 * reading CSR_INT. */
5123 if (inta_fh & CSR_FH_INT_RX_MASK)
5124 inta |= CSR_INT_BIT_FH_RX;
5125 if (inta_fh & CSR_FH_INT_TX_MASK)
5126 inta |= CSR_INT_BIT_FH_TX;
5127
5128 /* Now service all interrupt bits discovered above. */
5129 if (inta & CSR_INT_BIT_HW_ERR) {
5130 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5131
5132 /* Tell the device to stop sending interrupts */
bb8c093b 5133 iwl4965_disable_interrupts(priv);
b481de9c 5134
bb8c093b 5135 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5136
5137 handled |= CSR_INT_BIT_HW_ERR;
5138
5139 spin_unlock_irqrestore(&priv->lock, flags);
5140
5141 return;
5142 }
5143
c8b0e6e1 5144#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5145 if (iwl4965_debug_level & (IWL_DL_ISR)) {
b481de9c 5146 /* NIC fires this, but we don't use it, redundant with WAKEUP */
25c03d8e
JP
5147 if (inta & CSR_INT_BIT_SCD)
5148 IWL_DEBUG_ISR("Scheduler finished to transmit "
5149 "the frame/frames.\n");
b481de9c
ZY
5150
5151 /* Alive notification via Rx interrupt will do the real work */
5152 if (inta & CSR_INT_BIT_ALIVE)
5153 IWL_DEBUG_ISR("Alive interrupt\n");
5154 }
5155#endif
5156 /* Safely ignore these bits for debug checks below */
25c03d8e 5157 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
b481de9c 5158
9fbab516 5159 /* HW RF KILL switch toggled */
b481de9c
ZY
5160 if (inta & CSR_INT_BIT_RF_KILL) {
5161 int hw_rf_kill = 0;
bb8c093b 5162 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
5163 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5164 hw_rf_kill = 1;
5165
5166 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5167 "RF_KILL bit toggled to %s.\n",
5168 hw_rf_kill ? "disable radio":"enable radio");
5169
5170 /* Queue restart only if RF_KILL switch was set to "kill"
5171 * when we loaded driver, and is now set to "enable".
5172 * After we're Alive, RF_KILL gets handled by
5173 * iwl_rx_card_state_notif() */
53e49093
ZY
5174 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5175 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 5176 queue_work(priv->workqueue, &priv->restart);
53e49093 5177 }
b481de9c
ZY
5178
5179 handled |= CSR_INT_BIT_RF_KILL;
5180 }
5181
9fbab516 5182 /* Chip got too hot and stopped itself */
b481de9c
ZY
5183 if (inta & CSR_INT_BIT_CT_KILL) {
5184 IWL_ERROR("Microcode CT kill error detected.\n");
5185 handled |= CSR_INT_BIT_CT_KILL;
5186 }
5187
5188 /* Error detected by uCode */
5189 if (inta & CSR_INT_BIT_SW_ERR) {
5190 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5191 inta);
bb8c093b 5192 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5193 handled |= CSR_INT_BIT_SW_ERR;
5194 }
5195
5196 /* uCode wakes up after power-down sleep */
5197 if (inta & CSR_INT_BIT_WAKEUP) {
5198 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
5199 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
5200 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5201 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5202 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5203 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5204 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5205 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
5206
5207 handled |= CSR_INT_BIT_WAKEUP;
5208 }
5209
5210 /* All uCode command responses, including Tx command responses,
5211 * Rx "responses" (frame-received notification), and other
5212 * notifications from uCode come through here*/
5213 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 5214 iwl4965_rx_handle(priv);
b481de9c
ZY
5215 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5216 }
5217
5218 if (inta & CSR_INT_BIT_FH_TX) {
5219 IWL_DEBUG_ISR("Tx interrupt\n");
5220 handled |= CSR_INT_BIT_FH_TX;
5221 }
5222
5223 if (inta & ~handled)
5224 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5225
5226 if (inta & ~CSR_INI_SET_MASK) {
5227 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5228 inta & ~CSR_INI_SET_MASK);
5229 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5230 }
5231
5232 /* Re-enable all interrupts */
bb8c093b 5233 iwl4965_enable_interrupts(priv);
b481de9c 5234
c8b0e6e1 5235#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5236 if (iwl4965_debug_level & (IWL_DL_ISR)) {
5237 inta = iwl4965_read32(priv, CSR_INT);
5238 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
5239 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5240 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5241 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5242 }
5243#endif
5244 spin_unlock_irqrestore(&priv->lock, flags);
5245}
5246
bb8c093b 5247static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 5248{
bb8c093b 5249 struct iwl4965_priv *priv = data;
b481de9c
ZY
5250 u32 inta, inta_mask;
5251 u32 inta_fh;
5252 if (!priv)
5253 return IRQ_NONE;
5254
5255 spin_lock(&priv->lock);
5256
5257 /* Disable (but don't clear!) interrupts here to avoid
5258 * back-to-back ISRs and sporadic interrupts from our NIC.
5259 * If we have something to service, the tasklet will re-enable ints.
5260 * If we *don't* have something, we'll re-enable before leaving here. */
bb8c093b
CH
5261 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */
5262 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
5263
5264 /* Discover which interrupts are active/pending */
bb8c093b
CH
5265 inta = iwl4965_read32(priv, CSR_INT);
5266 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5267
5268 /* Ignore interrupt if there's nothing in NIC to service.
5269 * This may be due to IRQ shared with another device,
5270 * or due to sporadic interrupts thrown from our NIC. */
5271 if (!inta && !inta_fh) {
5272 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5273 goto none;
5274 }
5275
5276 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
5277 /* Hardware disappeared. It might have already raised
5278 * an interrupt */
b481de9c 5279 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 5280 goto unplugged;
b481de9c
ZY
5281 }
5282
5283 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5284 inta, inta_mask, inta_fh);
5285
25c03d8e
JP
5286 inta &= ~CSR_INT_BIT_SCD;
5287
bb8c093b 5288 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
25c03d8e
JP
5289 if (likely(inta || inta_fh))
5290 tasklet_schedule(&priv->irq_tasklet);
b481de9c 5291
66fbb541
ON
5292 unplugged:
5293 spin_unlock(&priv->lock);
b481de9c
ZY
5294 return IRQ_HANDLED;
5295
5296 none:
5297 /* re-enable interrupts here since we don't have anything to service. */
bb8c093b 5298 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5299 spin_unlock(&priv->lock);
5300 return IRQ_NONE;
5301}
5302
5303/************************** EEPROM BANDS ****************************
5304 *
bb8c093b 5305 * The iwl4965_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
5306 * EEPROM contents to the specific channel number supported for each
5307 * band.
5308 *
bb8c093b 5309 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
b481de9c
ZY
5310 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5311 * The specific geography and calibration information for that channel
5312 * is contained in the eeprom map itself.
5313 *
5314 * During init, we copy the eeprom information and channel map
5315 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5316 *
5317 * channel_map_24/52 provides the index in the channel_info array for a
5318 * given channel. We have to have two separate maps as there is channel
5319 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5320 * band_2
5321 *
5322 * A value of 0xff stored in the channel_map indicates that the channel
5323 * is not supported by the hardware at all.
5324 *
5325 * A value of 0xfe in the channel_map indicates that the channel is not
5326 * valid for Tx with the current hardware. This means that
5327 * while the system can tune and receive on a given channel, it may not
5328 * be able to associate or transmit any frames on that
5329 * channel. There is no corresponding channel information for that
5330 * entry.
5331 *
5332 *********************************************************************/
5333
5334/* 2.4 GHz */
bb8c093b 5335static const u8 iwl4965_eeprom_band_1[14] = {
b481de9c
ZY
5336 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5337};
5338
5339/* 5.2 GHz bands */
9fbab516 5340static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
5341 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5342};
5343
9fbab516 5344static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
5345 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5346};
5347
bb8c093b 5348static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
5349 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5350};
5351
bb8c093b 5352static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
5353 145, 149, 153, 157, 161, 165
5354};
5355
bb8c093b 5356static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
b481de9c
ZY
5357 1, 2, 3, 4, 5, 6, 7
5358};
5359
bb8c093b 5360static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
b481de9c
ZY
5361 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5362};
5363
9fbab516
BC
5364static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5365 int band,
b481de9c 5366 int *eeprom_ch_count,
bb8c093b 5367 const struct iwl4965_eeprom_channel
b481de9c
ZY
5368 **eeprom_ch_info,
5369 const u8 **eeprom_ch_index)
5370{
5371 switch (band) {
5372 case 1: /* 2.4GHz band */
bb8c093b 5373 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
b481de9c 5374 *eeprom_ch_info = priv->eeprom.band_1_channels;
bb8c093b 5375 *eeprom_ch_index = iwl4965_eeprom_band_1;
b481de9c 5376 break;
9fbab516 5377 case 2: /* 4.9GHz band */
bb8c093b 5378 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
b481de9c 5379 *eeprom_ch_info = priv->eeprom.band_2_channels;
bb8c093b 5380 *eeprom_ch_index = iwl4965_eeprom_band_2;
b481de9c
ZY
5381 break;
5382 case 3: /* 5.2GHz band */
bb8c093b 5383 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
b481de9c 5384 *eeprom_ch_info = priv->eeprom.band_3_channels;
bb8c093b 5385 *eeprom_ch_index = iwl4965_eeprom_band_3;
b481de9c 5386 break;
9fbab516 5387 case 4: /* 5.5GHz band */
bb8c093b 5388 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
b481de9c 5389 *eeprom_ch_info = priv->eeprom.band_4_channels;
bb8c093b 5390 *eeprom_ch_index = iwl4965_eeprom_band_4;
b481de9c 5391 break;
9fbab516 5392 case 5: /* 5.7GHz band */
bb8c093b 5393 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c 5394 *eeprom_ch_info = priv->eeprom.band_5_channels;
bb8c093b 5395 *eeprom_ch_index = iwl4965_eeprom_band_5;
b481de9c 5396 break;
9fbab516 5397 case 6: /* 2.4GHz FAT channels */
bb8c093b 5398 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
b481de9c 5399 *eeprom_ch_info = priv->eeprom.band_24_channels;
bb8c093b 5400 *eeprom_ch_index = iwl4965_eeprom_band_6;
b481de9c 5401 break;
9fbab516 5402 case 7: /* 5 GHz FAT channels */
bb8c093b 5403 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
b481de9c 5404 *eeprom_ch_info = priv->eeprom.band_52_channels;
bb8c093b 5405 *eeprom_ch_index = iwl4965_eeprom_band_7;
b481de9c
ZY
5406 break;
5407 default:
5408 BUG();
5409 return;
5410 }
5411}
5412
6440adb5
BC
5413/**
5414 * iwl4965_get_channel_info - Find driver's private channel info
5415 *
5416 * Based on band and channel number.
5417 */
bb8c093b 5418const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
b481de9c
ZY
5419 int phymode, u16 channel)
5420{
5421 int i;
5422
5423 switch (phymode) {
5424 case MODE_IEEE80211A:
5425 for (i = 14; i < priv->channel_count; i++) {
5426 if (priv->channel_info[i].channel == channel)
5427 return &priv->channel_info[i];
5428 }
5429 break;
5430
5431 case MODE_IEEE80211B:
5432 case MODE_IEEE80211G:
5433 if (channel >= 1 && channel <= 14)
5434 return &priv->channel_info[channel - 1];
5435 break;
5436
5437 }
5438
5439 return NULL;
5440}
5441
5442#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5443 ? # x " " : "")
5444
6440adb5
BC
5445/**
5446 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5447 */
bb8c093b 5448static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
b481de9c
ZY
5449{
5450 int eeprom_ch_count = 0;
5451 const u8 *eeprom_ch_index = NULL;
bb8c093b 5452 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 5453 int band, ch;
bb8c093b 5454 struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5455
5456 if (priv->channel_count) {
5457 IWL_DEBUG_INFO("Channel map already initialized.\n");
5458 return 0;
5459 }
5460
5461 if (priv->eeprom.version < 0x2f) {
5462 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5463 priv->eeprom.version);
5464 return -EINVAL;
5465 }
5466
5467 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5468
5469 priv->channel_count =
bb8c093b
CH
5470 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5471 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5472 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5473 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5474 ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c
ZY
5475
5476 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5477
bb8c093b 5478 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
b481de9c
ZY
5479 priv->channel_count, GFP_KERNEL);
5480 if (!priv->channel_info) {
5481 IWL_ERROR("Could not allocate channel_info\n");
5482 priv->channel_count = 0;
5483 return -ENOMEM;
5484 }
5485
5486 ch_info = priv->channel_info;
5487
5488 /* Loop through the 5 EEPROM bands adding them in order to the
5489 * channel map we maintain (that contains additional information than
5490 * what just in the EEPROM) */
5491 for (band = 1; band <= 5; band++) {
5492
bb8c093b 5493 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5494 &eeprom_ch_info, &eeprom_ch_index);
5495
5496 /* Loop through each band adding each of the channels */
5497 for (ch = 0; ch < eeprom_ch_count; ch++) {
5498 ch_info->channel = eeprom_ch_index[ch];
5499 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5500 MODE_IEEE80211A;
5501
5502 /* permanently store EEPROM's channel regulatory flags
5503 * and max power in channel info database. */
5504 ch_info->eeprom = eeprom_ch_info[ch];
5505
5506 /* Copy the run-time flags so they are there even on
5507 * invalid channels */
5508 ch_info->flags = eeprom_ch_info[ch].flags;
5509
5510 if (!(is_channel_valid(ch_info))) {
5511 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5512 "No traffic\n",
5513 ch_info->channel,
5514 ch_info->flags,
5515 is_channel_a_band(ch_info) ?
5516 "5.2" : "2.4");
5517 ch_info++;
5518 continue;
5519 }
5520
5521 /* Initialize regulatory-based run-time data */
5522 ch_info->max_power_avg = ch_info->curr_txpow =
5523 eeprom_ch_info[ch].max_power_avg;
5524 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5525 ch_info->min_power = 0;
5526
5527 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5528 " %ddBm): Ad-Hoc %ssupported\n",
5529 ch_info->channel,
5530 is_channel_a_band(ch_info) ?
5531 "5.2" : "2.4",
5532 CHECK_AND_PRINT(IBSS),
5533 CHECK_AND_PRINT(ACTIVE),
5534 CHECK_AND_PRINT(RADAR),
5535 CHECK_AND_PRINT(WIDE),
5536 CHECK_AND_PRINT(NARROW),
5537 CHECK_AND_PRINT(DFS),
5538 eeprom_ch_info[ch].flags,
5539 eeprom_ch_info[ch].max_power_avg,
5540 ((eeprom_ch_info[ch].
5541 flags & EEPROM_CHANNEL_IBSS)
5542 && !(eeprom_ch_info[ch].
5543 flags & EEPROM_CHANNEL_RADAR))
5544 ? "" : "not ");
5545
5546 /* Set the user_txpower_limit to the highest power
5547 * supported by any channel */
5548 if (eeprom_ch_info[ch].max_power_avg >
5549 priv->user_txpower_limit)
5550 priv->user_txpower_limit =
5551 eeprom_ch_info[ch].max_power_avg;
5552
5553 ch_info++;
5554 }
5555 }
5556
6440adb5 5557 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
b481de9c
ZY
5558 for (band = 6; band <= 7; band++) {
5559 int phymode;
5560 u8 fat_extension_chan;
5561
bb8c093b 5562 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5563 &eeprom_ch_info, &eeprom_ch_index);
5564
6440adb5 5565 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
b481de9c 5566 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
6440adb5 5567
b481de9c
ZY
5568 /* Loop through each band adding each of the channels */
5569 for (ch = 0; ch < eeprom_ch_count; ch++) {
5570
5571 if ((band == 6) &&
5572 ((eeprom_ch_index[ch] == 5) ||
5573 (eeprom_ch_index[ch] == 6) ||
5574 (eeprom_ch_index[ch] == 7)))
5575 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5576 else
5577 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5578
6440adb5 5579 /* Set up driver's info for lower half */
b481de9c
ZY
5580 iwl4965_set_fat_chan_info(priv, phymode,
5581 eeprom_ch_index[ch],
5582 &(eeprom_ch_info[ch]),
5583 fat_extension_chan);
5584
6440adb5 5585 /* Set up driver's info for upper half */
b481de9c
ZY
5586 iwl4965_set_fat_chan_info(priv, phymode,
5587 (eeprom_ch_index[ch] + 4),
5588 &(eeprom_ch_info[ch]),
5589 HT_IE_EXT_CHANNEL_BELOW);
5590 }
5591 }
5592
5593 return 0;
5594}
5595
849e0dce
RC
5596/*
5597 * iwl4965_free_channel_map - undo allocations in iwl4965_init_channel_map
5598 */
5599static void iwl4965_free_channel_map(struct iwl4965_priv *priv)
5600{
5601 kfree(priv->channel_info);
5602 priv->channel_count = 0;
5603}
5604
b481de9c
ZY
5605/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5606 * sending probe req. This should be set long enough to hear probe responses
5607 * from more than one AP. */
5608#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5609#define IWL_ACTIVE_DWELL_TIME_52 (10)
5610
5611/* For faster active scanning, scan will move to the next channel if fewer than
5612 * PLCP_QUIET_THRESH packets are heard on this channel within
5613 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5614 * time if it's a quiet channel (nothing responded to our probe, and there's
5615 * no other traffic).
5616 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5617#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5618#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5619
5620/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5621 * Must be set longer than active dwell time.
5622 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5623#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5624#define IWL_PASSIVE_DWELL_TIME_52 (10)
5625#define IWL_PASSIVE_DWELL_BASE (100)
5626#define IWL_CHANNEL_TUNE_TIME 5
5627
bb8c093b 5628static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c
ZY
5629{
5630 if (phymode == MODE_IEEE80211A)
5631 return IWL_ACTIVE_DWELL_TIME_52;
5632 else
5633 return IWL_ACTIVE_DWELL_TIME_24;
5634}
5635
bb8c093b 5636static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c 5637{
bb8c093b 5638 u16 active = iwl4965_get_active_dwell_time(priv, phymode);
b481de9c
ZY
5639 u16 passive = (phymode != MODE_IEEE80211A) ?
5640 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5641 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5642
bb8c093b 5643 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5644 /* If we're associated, we clamp the maximum passive
5645 * dwell time to be 98% of the beacon interval (minus
5646 * 2 * channel tune time) */
5647 passive = priv->beacon_int;
5648 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5649 passive = IWL_PASSIVE_DWELL_BASE;
5650 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5651 }
5652
5653 if (passive <= active)
5654 passive = active + 1;
5655
5656 return passive;
5657}
5658
bb8c093b 5659static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
b481de9c 5660 u8 is_active, u8 direct_mask,
bb8c093b 5661 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
5662{
5663 const struct ieee80211_channel *channels = NULL;
5664 const struct ieee80211_hw_mode *hw_mode;
bb8c093b 5665 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5666 u16 passive_dwell = 0;
5667 u16 active_dwell = 0;
5668 int added, i;
5669
bb8c093b 5670 hw_mode = iwl4965_get_hw_mode(priv, phymode);
b481de9c
ZY
5671 if (!hw_mode)
5672 return 0;
5673
5674 channels = hw_mode->channels;
5675
bb8c093b
CH
5676 active_dwell = iwl4965_get_active_dwell_time(priv, phymode);
5677 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode);
b481de9c
ZY
5678
5679 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5680 if (channels[i].chan ==
5681 le16_to_cpu(priv->active_rxon.channel)) {
bb8c093b 5682 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5683 IWL_DEBUG_SCAN
5684 ("Skipping current channel %d\n",
5685 le16_to_cpu(priv->active_rxon.channel));
5686 continue;
5687 }
5688 } else if (priv->only_active_channel)
5689 continue;
5690
5691 scan_ch->channel = channels[i].chan;
5692
9fbab516
BC
5693 ch_info = iwl4965_get_channel_info(priv, phymode,
5694 scan_ch->channel);
b481de9c
ZY
5695 if (!is_channel_valid(ch_info)) {
5696 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5697 scan_ch->channel);
5698 continue;
5699 }
5700
5701 if (!is_active || is_channel_passive(ch_info) ||
5702 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5703 scan_ch->type = 0; /* passive */
5704 else
5705 scan_ch->type = 1; /* active */
5706
5707 if (scan_ch->type & 1)
5708 scan_ch->type |= (direct_mask << 1);
5709
5710 if (is_channel_narrow(ch_info))
5711 scan_ch->type |= (1 << 7);
5712
5713 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5714 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5715
9fbab516 5716 /* Set txpower levels to defaults */
b481de9c
ZY
5717 scan_ch->tpc.dsp_atten = 110;
5718 /* scan_pwr_info->tpc.dsp_atten; */
5719
5720 /*scan_pwr_info->tpc.tx_gain; */
5721 if (phymode == MODE_IEEE80211A)
5722 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5723 else {
5724 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5725 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516 5726 * power level:
8a1b0245 5727 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
b481de9c
ZY
5728 */
5729 }
5730
5731 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5732 scan_ch->channel,
5733 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5734 (scan_ch->type & 1) ?
5735 active_dwell : passive_dwell);
5736
5737 scan_ch++;
5738 added++;
5739 }
5740
5741 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5742 return added;
5743}
5744
bb8c093b 5745static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv)
b481de9c
ZY
5746{
5747 int i, j;
5748 for (i = 0; i < 3; i++) {
5749 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5750 for (j = 0; j < hw_mode->num_channels; j++)
5751 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5752 }
5753}
5754
bb8c093b 5755static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
b481de9c
ZY
5756 struct ieee80211_rate *rates)
5757{
5758 int i;
5759
5760 for (i = 0; i < IWL_RATE_COUNT; i++) {
bb8c093b 5761 rates[i].rate = iwl4965_rates[i].ieee * 5;
b481de9c
ZY
5762 rates[i].val = i; /* Rate scaling will work on indexes */
5763 rates[i].val2 = i;
5764 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5765 /* Only OFDM have the bits-per-symbol set */
5766 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5767 rates[i].flags |= IEEE80211_RATE_OFDM;
5768 else {
5769 /*
5770 * If CCK 1M then set rate flag to CCK else CCK_2
5771 * which is CCK | PREAMBLE2
5772 */
bb8c093b 5773 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ?
b481de9c
ZY
5774 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5775 }
5776
5777 /* Set up which ones are basic rates... */
5778 if (IWL_BASIC_RATES_MASK & (1 << i))
5779 rates[i].flags |= IEEE80211_RATE_BASIC;
5780 }
b481de9c
ZY
5781}
5782
5783/**
bb8c093b 5784 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 5785 */
bb8c093b 5786static int iwl4965_init_geos(struct iwl4965_priv *priv)
b481de9c 5787{
bb8c093b 5788 struct iwl4965_channel_info *ch;
b481de9c
ZY
5789 struct ieee80211_hw_mode *modes;
5790 struct ieee80211_channel *channels;
5791 struct ieee80211_channel *geo_ch;
5792 struct ieee80211_rate *rates;
5793 int i = 0;
5794 enum {
5795 A = 0,
5796 B = 1,
5797 G = 2,
b481de9c 5798 };
326eeee8 5799 int mode_count = 3;
b481de9c
ZY
5800
5801 if (priv->modes) {
5802 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5803 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5804 return 0;
5805 }
5806
5807 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5808 GFP_KERNEL);
5809 if (!modes)
5810 return -ENOMEM;
5811
5812 channels = kzalloc(sizeof(struct ieee80211_channel) *
5813 priv->channel_count, GFP_KERNEL);
5814 if (!channels) {
5815 kfree(modes);
5816 return -ENOMEM;
5817 }
5818
5819 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5820 GFP_KERNEL);
5821 if (!rates) {
5822 kfree(modes);
5823 kfree(channels);
5824 return -ENOMEM;
5825 }
5826
5827 /* 0 = 802.11a
5828 * 1 = 802.11b
5829 * 2 = 802.11g
5830 */
5831
5832 /* 5.2GHz channels start after the 2.4GHz channels */
5833 modes[A].mode = MODE_IEEE80211A;
bb8c093b 5834 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5835 modes[A].rates = rates;
5836 modes[A].num_rates = 8; /* just OFDM */
5837 modes[A].rates = &rates[4];
5838 modes[A].num_channels = 0;
326eeee8
RR
5839#ifdef CONFIG_IWL4965_HT
5840 iwl4965_init_ht_hw_capab(&modes[A].ht_info, MODE_IEEE80211A);
5841#endif
b481de9c
ZY
5842
5843 modes[B].mode = MODE_IEEE80211B;
5844 modes[B].channels = channels;
5845 modes[B].rates = rates;
5846 modes[B].num_rates = 4; /* just CCK */
5847 modes[B].num_channels = 0;
5848
5849 modes[G].mode = MODE_IEEE80211G;
5850 modes[G].channels = channels;
5851 modes[G].rates = rates;
5852 modes[G].num_rates = 12; /* OFDM & CCK */
5853 modes[G].num_channels = 0;
326eeee8
RR
5854#ifdef CONFIG_IWL4965_HT
5855 iwl4965_init_ht_hw_capab(&modes[G].ht_info, MODE_IEEE80211G);
5856#endif
b481de9c
ZY
5857
5858 priv->ieee_channels = channels;
5859 priv->ieee_rates = rates;
5860
bb8c093b 5861 iwl4965_init_hw_rates(priv, rates);
b481de9c
ZY
5862
5863 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5864 ch = &priv->channel_info[i];
5865
5866 if (!is_channel_valid(ch)) {
5867 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5868 "skipping.\n",
5869 ch->channel, is_channel_a_band(ch) ?
5870 "5.2" : "2.4");
5871 continue;
5872 }
5873
5874 if (is_channel_a_band(ch)) {
5875 geo_ch = &modes[A].channels[modes[A].num_channels++];
b481de9c
ZY
5876 } else {
5877 geo_ch = &modes[B].channels[modes[B].num_channels++];
5878 modes[G].num_channels++;
b481de9c
ZY
5879 }
5880
5881 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5882 geo_ch->chan = ch->channel;
5883 geo_ch->power_level = ch->max_power_avg;
5884 geo_ch->antenna_max = 0xff;
5885
5886 if (is_channel_valid(ch)) {
5887 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5888 if (ch->flags & EEPROM_CHANNEL_IBSS)
5889 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5890
5891 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5892 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5893
5894 if (ch->flags & EEPROM_CHANNEL_RADAR)
5895 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5896
5897 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5898 priv->max_channel_txpower_limit =
5899 ch->max_power_avg;
5900 }
5901
5902 geo_ch->val = geo_ch->flag;
5903 }
5904
5905 if ((modes[A].num_channels == 0) && priv->is_abg) {
5906 printk(KERN_INFO DRV_NAME
5907 ": Incorrectly detected BG card as ABG. Please send "
5908 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5909 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5910 priv->is_abg = 0;
5911 }
5912
5913 printk(KERN_INFO DRV_NAME
5914 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5915 modes[G].num_channels, modes[A].num_channels);
5916
5917 /*
5918 * NOTE: We register these in preference of order -- the
5919 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5920 * a phymode based on rates or AP capabilities but seems to
5921 * configure it purely on if the channel being configured
5922 * is supported by a mode -- and the first match is taken
5923 */
5924
5925 if (modes[G].num_channels)
5926 ieee80211_register_hwmode(priv->hw, &modes[G]);
5927 if (modes[B].num_channels)
5928 ieee80211_register_hwmode(priv->hw, &modes[B]);
5929 if (modes[A].num_channels)
5930 ieee80211_register_hwmode(priv->hw, &modes[A]);
5931
5932 priv->modes = modes;
5933 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5934
5935 return 0;
5936}
5937
849e0dce
RC
5938/*
5939 * iwl4965_free_geos - undo allocations in iwl4965_init_geos
5940 */
5941static void iwl4965_free_geos(struct iwl4965_priv *priv)
5942{
5943 kfree(priv->modes);
5944 kfree(priv->ieee_channels);
5945 kfree(priv->ieee_rates);
5946 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
5947}
5948
b481de9c
ZY
5949/******************************************************************************
5950 *
5951 * uCode download functions
5952 *
5953 ******************************************************************************/
5954
bb8c093b 5955static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
b481de9c 5956{
98c92211
TW
5957 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
5958 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
5959 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
5960 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
5961 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
5962 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c
ZY
5963}
5964
5965/**
bb8c093b 5966 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
5967 * looking at all data.
5968 */
4fd1f841 5969static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
9fbab516 5970 u32 len)
b481de9c
ZY
5971{
5972 u32 val;
5973 u32 save_len = len;
5974 int rc = 0;
5975 u32 errcnt;
5976
5977 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5978
bb8c093b 5979 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
5980 if (rc)
5981 return rc;
5982
bb8c093b 5983 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
5984
5985 errcnt = 0;
5986 for (; len > 0; len -= sizeof(u32), image++) {
5987 /* read data comes through single port, auto-incr addr */
5988 /* NOTE: Use the debugless read so we don't flood kernel log
5989 * if IWL_DL_IO is set */
bb8c093b 5990 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
5991 if (val != le32_to_cpu(*image)) {
5992 IWL_ERROR("uCode INST section is invalid at "
5993 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5994 save_len - len, val, le32_to_cpu(*image));
5995 rc = -EIO;
5996 errcnt++;
5997 if (errcnt >= 20)
5998 break;
5999 }
6000 }
6001
bb8c093b 6002 iwl4965_release_nic_access(priv);
b481de9c
ZY
6003
6004 if (!errcnt)
6005 IWL_DEBUG_INFO
6006 ("ucode image in INSTRUCTION memory is good\n");
6007
6008 return rc;
6009}
6010
6011
6012/**
bb8c093b 6013 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
6014 * using sample data 100 bytes apart. If these sample points are good,
6015 * it's a pretty good bet that everything between them is good, too.
6016 */
bb8c093b 6017static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
6018{
6019 u32 val;
6020 int rc = 0;
6021 u32 errcnt = 0;
6022 u32 i;
6023
6024 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6025
bb8c093b 6026 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6027 if (rc)
6028 return rc;
6029
6030 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
6031 /* read data comes through single port, auto-incr addr */
6032 /* NOTE: Use the debugless read so we don't flood kernel log
6033 * if IWL_DL_IO is set */
bb8c093b 6034 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 6035 i + RTC_INST_LOWER_BOUND);
bb8c093b 6036 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6037 if (val != le32_to_cpu(*image)) {
6038#if 0 /* Enable this if you want to see details */
6039 IWL_ERROR("uCode INST section is invalid at "
6040 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6041 i, val, *image);
6042#endif
6043 rc = -EIO;
6044 errcnt++;
6045 if (errcnt >= 3)
6046 break;
6047 }
6048 }
6049
bb8c093b 6050 iwl4965_release_nic_access(priv);
b481de9c
ZY
6051
6052 return rc;
6053}
6054
6055
6056/**
bb8c093b 6057 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
6058 * and verify its contents
6059 */
bb8c093b 6060static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
b481de9c
ZY
6061{
6062 __le32 *image;
6063 u32 len;
6064 int rc = 0;
6065
6066 /* Try bootstrap */
6067 image = (__le32 *)priv->ucode_boot.v_addr;
6068 len = priv->ucode_boot.len;
bb8c093b 6069 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6070 if (rc == 0) {
6071 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
6072 return 0;
6073 }
6074
6075 /* Try initialize */
6076 image = (__le32 *)priv->ucode_init.v_addr;
6077 len = priv->ucode_init.len;
bb8c093b 6078 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6079 if (rc == 0) {
6080 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
6081 return 0;
6082 }
6083
6084 /* Try runtime/protocol */
6085 image = (__le32 *)priv->ucode_code.v_addr;
6086 len = priv->ucode_code.len;
bb8c093b 6087 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6088 if (rc == 0) {
6089 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
6090 return 0;
6091 }
6092
6093 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
6094
9fbab516
BC
6095 /* Since nothing seems to match, show first several data entries in
6096 * instruction SRAM, so maybe visual inspection will give a clue.
6097 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
6098 image = (__le32 *)priv->ucode_boot.v_addr;
6099 len = priv->ucode_boot.len;
bb8c093b 6100 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
6101
6102 return rc;
6103}
6104
6105
6106/* check contents of special bootstrap uCode SRAM */
bb8c093b 6107static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6108{
6109 __le32 *image = priv->ucode_boot.v_addr;
6110 u32 len = priv->ucode_boot.len;
6111 u32 reg;
6112 u32 val;
6113
6114 IWL_DEBUG_INFO("Begin verify bsm\n");
6115
6116 /* verify BSM SRAM contents */
bb8c093b 6117 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
b481de9c
ZY
6118 for (reg = BSM_SRAM_LOWER_BOUND;
6119 reg < BSM_SRAM_LOWER_BOUND + len;
6120 reg += sizeof(u32), image ++) {
bb8c093b 6121 val = iwl4965_read_prph(priv, reg);
b481de9c
ZY
6122 if (val != le32_to_cpu(*image)) {
6123 IWL_ERROR("BSM uCode verification failed at "
6124 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6125 BSM_SRAM_LOWER_BOUND,
6126 reg - BSM_SRAM_LOWER_BOUND, len,
6127 val, le32_to_cpu(*image));
6128 return -EIO;
6129 }
6130 }
6131
6132 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6133
6134 return 0;
6135}
6136
6137/**
bb8c093b 6138 * iwl4965_load_bsm - Load bootstrap instructions
b481de9c
ZY
6139 *
6140 * BSM operation:
6141 *
6142 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6143 * in special SRAM that does not power down during RFKILL. When powering back
6144 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6145 * the bootstrap program into the on-board processor, and starts it.
6146 *
6147 * The bootstrap program loads (via DMA) instructions and data for a new
6148 * program from host DRAM locations indicated by the host driver in the
6149 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6150 * automatically.
6151 *
6152 * When initializing the NIC, the host driver points the BSM to the
6153 * "initialize" uCode image. This uCode sets up some internal data, then
6154 * notifies host via "initialize alive" that it is complete.
6155 *
6156 * The host then replaces the BSM_DRAM_* pointer values to point to the
6157 * normal runtime uCode instructions and a backup uCode data cache buffer
6158 * (filled initially with starting data values for the on-board processor),
6159 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6160 * which begins normal operation.
6161 *
6162 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6163 * the backup data cache in DRAM before SRAM is powered down.
6164 *
6165 * When powering back up, the BSM loads the bootstrap program. This reloads
6166 * the runtime uCode instructions and the backup data cache into SRAM,
6167 * and re-launches the runtime uCode from where it left off.
6168 */
bb8c093b 6169static int iwl4965_load_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6170{
6171 __le32 *image = priv->ucode_boot.v_addr;
6172 u32 len = priv->ucode_boot.len;
6173 dma_addr_t pinst;
6174 dma_addr_t pdata;
6175 u32 inst_len;
6176 u32 data_len;
6177 int rc;
6178 int i;
6179 u32 done;
6180 u32 reg_offset;
6181
6182 IWL_DEBUG_INFO("Begin load bsm\n");
6183
6184 /* make sure bootstrap program is no larger than BSM's SRAM size */
6185 if (len > IWL_MAX_BSM_SIZE)
6186 return -EINVAL;
6187
6188 /* Tell bootstrap uCode where to find the "Initialize" uCode
9fbab516 6189 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
bb8c093b 6190 * NOTE: iwl4965_initialize_alive_start() will replace these values,
b481de9c
ZY
6191 * after the "initialize" uCode has run, to point to
6192 * runtime/protocol instructions and backup data cache. */
6193 pinst = priv->ucode_init.p_addr >> 4;
6194 pdata = priv->ucode_init_data.p_addr >> 4;
6195 inst_len = priv->ucode_init.len;
6196 data_len = priv->ucode_init_data.len;
6197
bb8c093b 6198 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6199 if (rc)
6200 return rc;
6201
bb8c093b
CH
6202 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6203 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6204 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6205 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
b481de9c
ZY
6206
6207 /* Fill BSM memory with bootstrap instructions */
6208 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6209 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6210 reg_offset += sizeof(u32), image++)
bb8c093b 6211 _iwl4965_write_prph(priv, reg_offset,
b481de9c
ZY
6212 le32_to_cpu(*image));
6213
bb8c093b 6214 rc = iwl4965_verify_bsm(priv);
b481de9c 6215 if (rc) {
bb8c093b 6216 iwl4965_release_nic_access(priv);
b481de9c
ZY
6217 return rc;
6218 }
6219
6220 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
bb8c093b
CH
6221 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6222 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
b481de9c 6223 RTC_INST_LOWER_BOUND);
bb8c093b 6224 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
b481de9c
ZY
6225
6226 /* Load bootstrap code into instruction SRAM now,
6227 * to prepare to load "initialize" uCode */
bb8c093b 6228 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6229 BSM_WR_CTRL_REG_BIT_START);
6230
6231 /* Wait for load of bootstrap uCode to finish */
6232 for (i = 0; i < 100; i++) {
bb8c093b 6233 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
b481de9c
ZY
6234 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6235 break;
6236 udelay(10);
6237 }
6238 if (i < 100)
6239 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6240 else {
6241 IWL_ERROR("BSM write did not complete!\n");
6242 return -EIO;
6243 }
6244
6245 /* Enable future boot loads whenever power management unit triggers it
6246 * (e.g. when powering back up after power-save shutdown) */
bb8c093b 6247 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6248 BSM_WR_CTRL_REG_BIT_START_EN);
6249
bb8c093b 6250 iwl4965_release_nic_access(priv);
b481de9c
ZY
6251
6252 return 0;
6253}
6254
bb8c093b 6255static void iwl4965_nic_start(struct iwl4965_priv *priv)
b481de9c
ZY
6256{
6257 /* Remove all resets to allow NIC to operate */
bb8c093b 6258 iwl4965_write32(priv, CSR_RESET, 0);
b481de9c
ZY
6259}
6260
90e759d1 6261
b481de9c 6262/**
bb8c093b 6263 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
6264 *
6265 * Copy into buffers for card to fetch via bus-mastering
6266 */
bb8c093b 6267static int iwl4965_read_ucode(struct iwl4965_priv *priv)
b481de9c 6268{
bb8c093b 6269 struct iwl4965_ucode *ucode;
90e759d1 6270 int ret;
b481de9c
ZY
6271 const struct firmware *ucode_raw;
6272 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6273 u8 *src;
6274 size_t len;
6275 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6276
6277 /* Ask kernel firmware_class module to get the boot firmware off disk.
6278 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
6279 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6280 if (ret < 0) {
6281 IWL_ERROR("%s firmware file req failed: Reason %d\n",
6282 name, ret);
b481de9c
ZY
6283 goto error;
6284 }
6285
6286 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6287 name, ucode_raw->size);
6288
6289 /* Make sure that we got at least our header! */
6290 if (ucode_raw->size < sizeof(*ucode)) {
6291 IWL_ERROR("File size way too small!\n");
90e759d1 6292 ret = -EINVAL;
b481de9c
ZY
6293 goto err_release;
6294 }
6295
6296 /* Data from ucode file: header followed by uCode images */
6297 ucode = (void *)ucode_raw->data;
6298
6299 ver = le32_to_cpu(ucode->ver);
6300 inst_size = le32_to_cpu(ucode->inst_size);
6301 data_size = le32_to_cpu(ucode->data_size);
6302 init_size = le32_to_cpu(ucode->init_size);
6303 init_data_size = le32_to_cpu(ucode->init_data_size);
6304 boot_size = le32_to_cpu(ucode->boot_size);
6305
6306 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6307 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6308 inst_size);
6309 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6310 data_size);
6311 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6312 init_size);
6313 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6314 init_data_size);
6315 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6316 boot_size);
6317
6318 /* Verify size of file vs. image size info in file's header */
6319 if (ucode_raw->size < sizeof(*ucode) +
6320 inst_size + data_size + init_size +
6321 init_data_size + boot_size) {
6322
6323 IWL_DEBUG_INFO("uCode file size %d too small\n",
6324 (int)ucode_raw->size);
90e759d1 6325 ret = -EINVAL;
b481de9c
ZY
6326 goto err_release;
6327 }
6328
6329 /* Verify that uCode images will fit in card's SRAM */
6330 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
6331 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
6332 inst_size);
6333 ret = -EINVAL;
b481de9c
ZY
6334 goto err_release;
6335 }
6336
6337 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
6338 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
6339 data_size);
6340 ret = -EINVAL;
b481de9c
ZY
6341 goto err_release;
6342 }
6343 if (init_size > IWL_MAX_INST_SIZE) {
6344 IWL_DEBUG_INFO
90e759d1
TW
6345 ("uCode init instr len %d too large to fit in\n",
6346 init_size);
6347 ret = -EINVAL;
b481de9c
ZY
6348 goto err_release;
6349 }
6350 if (init_data_size > IWL_MAX_DATA_SIZE) {
6351 IWL_DEBUG_INFO
90e759d1
TW
6352 ("uCode init data len %d too large to fit in\n",
6353 init_data_size);
6354 ret = -EINVAL;
b481de9c
ZY
6355 goto err_release;
6356 }
6357 if (boot_size > IWL_MAX_BSM_SIZE) {
6358 IWL_DEBUG_INFO
90e759d1
TW
6359 ("uCode boot instr len %d too large to fit in\n",
6360 boot_size);
6361 ret = -EINVAL;
b481de9c
ZY
6362 goto err_release;
6363 }
6364
6365 /* Allocate ucode buffers for card's bus-master loading ... */
6366
6367 /* Runtime instructions and 2 copies of data:
6368 * 1) unmodified from disk
6369 * 2) backup cache for save/restore during power-downs */
6370 priv->ucode_code.len = inst_size;
98c92211 6371 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
6372
6373 priv->ucode_data.len = data_size;
98c92211 6374 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
6375
6376 priv->ucode_data_backup.len = data_size;
98c92211 6377 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
6378
6379 /* Initialization instructions and data */
90e759d1
TW
6380 if (init_size && init_data_size) {
6381 priv->ucode_init.len = init_size;
98c92211 6382 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
90e759d1
TW
6383
6384 priv->ucode_init_data.len = init_data_size;
98c92211 6385 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
90e759d1
TW
6386
6387 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
6388 goto err_pci_alloc;
6389 }
b481de9c
ZY
6390
6391 /* Bootstrap (instructions only, no data) */
90e759d1
TW
6392 if (boot_size) {
6393 priv->ucode_boot.len = boot_size;
98c92211 6394 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 6395
90e759d1
TW
6396 if (!priv->ucode_boot.v_addr)
6397 goto err_pci_alloc;
6398 }
b481de9c
ZY
6399
6400 /* Copy images into buffers for card's bus-master reads ... */
6401
6402 /* Runtime instructions (first block of data in file) */
6403 src = &ucode->data[0];
6404 len = priv->ucode_code.len;
90e759d1 6405 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
6406 memcpy(priv->ucode_code.v_addr, src, len);
6407 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6408 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6409
6410 /* Runtime data (2nd block)
bb8c093b 6411 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
6412 src = &ucode->data[inst_size];
6413 len = priv->ucode_data.len;
90e759d1 6414 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
6415 memcpy(priv->ucode_data.v_addr, src, len);
6416 memcpy(priv->ucode_data_backup.v_addr, src, len);
6417
6418 /* Initialization instructions (3rd block) */
6419 if (init_size) {
6420 src = &ucode->data[inst_size + data_size];
6421 len = priv->ucode_init.len;
90e759d1
TW
6422 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
6423 len);
b481de9c
ZY
6424 memcpy(priv->ucode_init.v_addr, src, len);
6425 }
6426
6427 /* Initialization data (4th block) */
6428 if (init_data_size) {
6429 src = &ucode->data[inst_size + data_size + init_size];
6430 len = priv->ucode_init_data.len;
90e759d1
TW
6431 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
6432 len);
b481de9c
ZY
6433 memcpy(priv->ucode_init_data.v_addr, src, len);
6434 }
6435
6436 /* Bootstrap instructions (5th block) */
6437 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6438 len = priv->ucode_boot.len;
90e759d1 6439 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
6440 memcpy(priv->ucode_boot.v_addr, src, len);
6441
6442 /* We have our copies now, allow OS release its copies */
6443 release_firmware(ucode_raw);
6444 return 0;
6445
6446 err_pci_alloc:
6447 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 6448 ret = -ENOMEM;
bb8c093b 6449 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
6450
6451 err_release:
6452 release_firmware(ucode_raw);
6453
6454 error:
90e759d1 6455 return ret;
b481de9c
ZY
6456}
6457
6458
6459/**
bb8c093b 6460 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
6461 *
6462 * Tell initialization uCode where to find runtime uCode.
6463 *
6464 * BSM registers initially contain pointers to initialization uCode.
6465 * We need to replace them to load runtime uCode inst and data,
6466 * and to save runtime data when powering down.
6467 */
bb8c093b 6468static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
b481de9c
ZY
6469{
6470 dma_addr_t pinst;
6471 dma_addr_t pdata;
6472 int rc = 0;
6473 unsigned long flags;
6474
6475 /* bits 35:4 for 4965 */
6476 pinst = priv->ucode_code.p_addr >> 4;
6477 pdata = priv->ucode_data_backup.p_addr >> 4;
6478
6479 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 6480 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6481 if (rc) {
6482 spin_unlock_irqrestore(&priv->lock, flags);
6483 return rc;
6484 }
6485
6486 /* Tell bootstrap uCode where to find image to load */
bb8c093b
CH
6487 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6488 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6489 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
6490 priv->ucode_data.len);
6491
6492 /* Inst bytecount must be last to set up, bit 31 signals uCode
6493 * that all new ptr/size info is in place */
bb8c093b 6494 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
6495 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6496
bb8c093b 6497 iwl4965_release_nic_access(priv);
b481de9c
ZY
6498
6499 spin_unlock_irqrestore(&priv->lock, flags);
6500
6501 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6502
6503 return rc;
6504}
6505
6506/**
bb8c093b 6507 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
6508 *
6509 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6510 *
6511 * The 4965 "initialize" ALIVE reply contains calibration data for:
6512 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6513 * (3945 does not contain this data).
6514 *
6515 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6516*/
bb8c093b 6517static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6518{
6519 /* Check alive response for "valid" sign from uCode */
6520 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6521 /* We had an error bringing up the hardware, so take it
6522 * all the way back down so we can try again */
6523 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6524 goto restart;
6525 }
6526
6527 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6528 * This is a paranoid check, because we would not have gotten the
6529 * "initialize" alive if code weren't properly loaded. */
bb8c093b 6530 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6531 /* Runtime instruction load was bad;
6532 * take it all the way back down so we can try again */
6533 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6534 goto restart;
6535 }
6536
6537 /* Calculate temperature */
6538 priv->temperature = iwl4965_get_temperature(priv);
6539
6540 /* Send pointers to protocol/runtime uCode image ... init code will
6541 * load and launch runtime uCode, which will send us another "Alive"
6542 * notification. */
6543 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 6544 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
6545 /* Runtime instruction load won't happen;
6546 * take it all the way back down so we can try again */
6547 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6548 goto restart;
6549 }
6550 return;
6551
6552 restart:
6553 queue_work(priv->workqueue, &priv->restart);
6554}
6555
6556
6557/**
bb8c093b 6558 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 6559 * from protocol/runtime uCode (initialization uCode's
bb8c093b 6560 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 6561 */
bb8c093b 6562static void iwl4965_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6563{
6564 int rc = 0;
6565
6566 IWL_DEBUG_INFO("Runtime Alive received.\n");
6567
6568 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6569 /* We had an error bringing up the hardware, so take it
6570 * all the way back down so we can try again */
6571 IWL_DEBUG_INFO("Alive failed.\n");
6572 goto restart;
6573 }
6574
6575 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6576 * This is a paranoid check, because we would not have gotten the
6577 * "runtime" alive if code weren't properly loaded. */
bb8c093b 6578 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6579 /* Runtime instruction load was bad;
6580 * take it all the way back down so we can try again */
6581 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6582 goto restart;
6583 }
6584
bb8c093b 6585 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6586
6587 rc = iwl4965_alive_notify(priv);
6588 if (rc) {
6589 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6590 rc);
6591 goto restart;
6592 }
6593
9fbab516 6594 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
6595 set_bit(STATUS_ALIVE, &priv->status);
6596
6597 /* Clear out the uCode error bit if it is set */
6598 clear_bit(STATUS_FW_ERROR, &priv->status);
6599
bb8c093b 6600 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
6601 return;
6602
5a66926a 6603 ieee80211_start_queues(priv->hw);
b481de9c
ZY
6604
6605 priv->active_rate = priv->rates_mask;
6606 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6607
bb8c093b 6608 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 6609
bb8c093b
CH
6610 if (iwl4965_is_associated(priv)) {
6611 struct iwl4965_rxon_cmd *active_rxon =
6612 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
6613
6614 memcpy(&priv->staging_rxon, &priv->active_rxon,
6615 sizeof(priv->staging_rxon));
6616 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6617 } else {
6618 /* Initialize our rx_config data */
bb8c093b 6619 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
6620 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6621 }
6622
9fbab516 6623 /* Configure Bluetooth device coexistence support */
bb8c093b 6624 iwl4965_send_bt_config(priv);
b481de9c
ZY
6625
6626 /* Configure the adapter for unassociated operation */
bb8c093b 6627 iwl4965_commit_rxon(priv);
b481de9c
ZY
6628
6629 /* At this point, the NIC is initialized and operational */
6630 priv->notif_missed_beacons = 0;
6631 set_bit(STATUS_READY, &priv->status);
6632
6633 iwl4965_rf_kill_ct_config(priv);
5a66926a 6634
b481de9c 6635 IWL_DEBUG_INFO("ALIVE processing complete.\n");
5a66926a 6636 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
6637
6638 if (priv->error_recovering)
bb8c093b 6639 iwl4965_error_recovery(priv);
b481de9c
ZY
6640
6641 return;
6642
6643 restart:
6644 queue_work(priv->workqueue, &priv->restart);
6645}
6646
bb8c093b 6647static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv);
b481de9c 6648
bb8c093b 6649static void __iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6650{
6651 unsigned long flags;
6652 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6653 struct ieee80211_conf *conf = NULL;
6654
6655 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6656
6657 conf = ieee80211_get_hw_conf(priv->hw);
6658
6659 if (!exit_pending)
6660 set_bit(STATUS_EXIT_PENDING, &priv->status);
6661
bb8c093b 6662 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6663
6664 /* Unblock any waiting calls */
6665 wake_up_interruptible_all(&priv->wait_command_queue);
6666
b481de9c
ZY
6667 /* Wipe out the EXIT_PENDING status bit if we are not actually
6668 * exiting the module */
6669 if (!exit_pending)
6670 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6671
6672 /* stop and reset the on-board processor */
bb8c093b 6673 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
6674
6675 /* tell the device to stop sending interrupts */
bb8c093b 6676 iwl4965_disable_interrupts(priv);
b481de9c
ZY
6677
6678 if (priv->mac80211_registered)
6679 ieee80211_stop_queues(priv->hw);
6680
bb8c093b 6681 /* If we have not previously called iwl4965_init() then
b481de9c 6682 * clear all bits but the RF Kill and SUSPEND bits and return */
bb8c093b 6683 if (!iwl4965_is_init(priv)) {
b481de9c
ZY
6684 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6685 STATUS_RF_KILL_HW |
6686 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6687 STATUS_RF_KILL_SW |
9788864e
RC
6688 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
6689 STATUS_GEO_CONFIGURED |
b481de9c
ZY
6690 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6691 STATUS_IN_SUSPEND;
6692 goto exit;
6693 }
6694
6695 /* ...otherwise clear out all the status bits but the RF Kill and
6696 * SUSPEND bits and continue taking the NIC down. */
6697 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6698 STATUS_RF_KILL_HW |
6699 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6700 STATUS_RF_KILL_SW |
9788864e
RC
6701 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
6702 STATUS_GEO_CONFIGURED |
b481de9c
ZY
6703 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6704 STATUS_IN_SUSPEND |
6705 test_bit(STATUS_FW_ERROR, &priv->status) <<
6706 STATUS_FW_ERROR;
6707
6708 spin_lock_irqsave(&priv->lock, flags);
9fbab516
BC
6709 iwl4965_clear_bit(priv, CSR_GP_CNTRL,
6710 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
6711 spin_unlock_irqrestore(&priv->lock, flags);
6712
bb8c093b
CH
6713 iwl4965_hw_txq_ctx_stop(priv);
6714 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
6715
6716 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
6717 if (!iwl4965_grab_nic_access(priv)) {
6718 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 6719 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 6720 iwl4965_release_nic_access(priv);
b481de9c
ZY
6721 }
6722 spin_unlock_irqrestore(&priv->lock, flags);
6723
6724 udelay(5);
6725
bb8c093b
CH
6726 iwl4965_hw_nic_stop_master(priv);
6727 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6728 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
6729
6730 exit:
bb8c093b 6731 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
6732
6733 if (priv->ibss_beacon)
6734 dev_kfree_skb(priv->ibss_beacon);
6735 priv->ibss_beacon = NULL;
6736
6737 /* clear out any free frames */
bb8c093b 6738 iwl4965_clear_free_frames(priv);
b481de9c
ZY
6739}
6740
bb8c093b 6741static void iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6742{
6743 mutex_lock(&priv->mutex);
bb8c093b 6744 __iwl4965_down(priv);
b481de9c 6745 mutex_unlock(&priv->mutex);
b24d22b1 6746
bb8c093b 6747 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
6748}
6749
6750#define MAX_HW_RESTARTS 5
6751
bb8c093b 6752static int __iwl4965_up(struct iwl4965_priv *priv)
b481de9c
ZY
6753{
6754 int rc, i;
b481de9c
ZY
6755
6756 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6757 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6758 return -EIO;
6759 }
6760
6761 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6762 IWL_WARNING("Radio disabled by SW RF kill (module "
6763 "parameter)\n");
e655b9f0
ZY
6764 return -ENODEV;
6765 }
6766
e903fbd4
RC
6767 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
6768 IWL_ERROR("ucode not available for device bringup\n");
6769 return -EIO;
6770 }
6771
e655b9f0
ZY
6772 /* If platform's RF_KILL switch is NOT set to KILL */
6773 if (iwl4965_read32(priv, CSR_GP_CNTRL) &
6774 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6775 clear_bit(STATUS_RF_KILL_HW, &priv->status);
6776 else {
6777 set_bit(STATUS_RF_KILL_HW, &priv->status);
6778 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
6779 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6780 return -ENODEV;
6781 }
b481de9c
ZY
6782 }
6783
bb8c093b 6784 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 6785
bb8c093b 6786 rc = iwl4965_hw_nic_init(priv);
b481de9c
ZY
6787 if (rc) {
6788 IWL_ERROR("Unable to int nic\n");
6789 return rc;
6790 }
6791
6792 /* make sure rfkill handshake bits are cleared */
bb8c093b
CH
6793 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6794 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
6795 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6796
6797 /* clear (again), then enable host interrupts */
bb8c093b
CH
6798 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
6799 iwl4965_enable_interrupts(priv);
b481de9c
ZY
6800
6801 /* really make sure rfkill handshake bits are cleared */
bb8c093b
CH
6802 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6803 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
6804
6805 /* Copy original ucode data image from disk into backup cache.
6806 * This will be used to initialize the on-board processor's
6807 * data SRAM for a clean start when the runtime program first loads. */
6808 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 6809 priv->ucode_data.len);
b481de9c 6810
e655b9f0
ZY
6811 /* We return success when we resume from suspend and rf_kill is on. */
6812 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
b481de9c 6813 return 0;
b481de9c
ZY
6814
6815 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6816
bb8c093b 6817 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6818
6819 /* load bootstrap state machine,
6820 * load bootstrap program into processor's memory,
6821 * prepare to load the "initialize" uCode */
bb8c093b 6822 rc = iwl4965_load_bsm(priv);
b481de9c
ZY
6823
6824 if (rc) {
6825 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6826 continue;
6827 }
6828
6829 /* start card; "initialize" will load runtime ucode */
bb8c093b 6830 iwl4965_nic_start(priv);
b481de9c 6831
b481de9c
ZY
6832 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6833
6834 return 0;
6835 }
6836
6837 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 6838 __iwl4965_down(priv);
b481de9c
ZY
6839
6840 /* tried to restart and config the device for as long as our
6841 * patience could withstand */
6842 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6843 return -EIO;
6844}
6845
6846
6847/*****************************************************************************
6848 *
6849 * Workqueue callbacks
6850 *
6851 *****************************************************************************/
6852
bb8c093b 6853static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 6854{
bb8c093b
CH
6855 struct iwl4965_priv *priv =
6856 container_of(data, struct iwl4965_priv, init_alive_start.work);
b481de9c
ZY
6857
6858 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6859 return;
6860
6861 mutex_lock(&priv->mutex);
bb8c093b 6862 iwl4965_init_alive_start(priv);
b481de9c
ZY
6863 mutex_unlock(&priv->mutex);
6864}
6865
bb8c093b 6866static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 6867{
bb8c093b
CH
6868 struct iwl4965_priv *priv =
6869 container_of(data, struct iwl4965_priv, alive_start.work);
b481de9c
ZY
6870
6871 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6872 return;
6873
6874 mutex_lock(&priv->mutex);
bb8c093b 6875 iwl4965_alive_start(priv);
b481de9c
ZY
6876 mutex_unlock(&priv->mutex);
6877}
6878
bb8c093b 6879static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 6880{
bb8c093b 6881 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill);
b481de9c
ZY
6882
6883 wake_up_interruptible(&priv->wait_command_queue);
6884
6885 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6886 return;
6887
6888 mutex_lock(&priv->mutex);
6889
bb8c093b 6890 if (!iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6891 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6892 "HW and/or SW RF Kill no longer active, restarting "
6893 "device\n");
6894 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6895 queue_work(priv->workqueue, &priv->restart);
6896 } else {
6897
6898 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6899 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6900 "disabled by SW switch\n");
6901 else
6902 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6903 "Kill switch must be turned off for "
6904 "wireless networking to work.\n");
6905 }
6906 mutex_unlock(&priv->mutex);
6907}
6908
6909#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6910
bb8c093b 6911static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 6912{
bb8c093b
CH
6913 struct iwl4965_priv *priv =
6914 container_of(data, struct iwl4965_priv, scan_check.work);
b481de9c
ZY
6915
6916 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6917 return;
6918
6919 mutex_lock(&priv->mutex);
6920 if (test_bit(STATUS_SCANNING, &priv->status) ||
6921 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6922 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6923 "Scan completion watchdog resetting adapter (%dms)\n",
6924 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 6925
b481de9c 6926 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 6927 iwl4965_send_scan_abort(priv);
b481de9c
ZY
6928 }
6929 mutex_unlock(&priv->mutex);
6930}
6931
bb8c093b 6932static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 6933{
bb8c093b
CH
6934 struct iwl4965_priv *priv =
6935 container_of(data, struct iwl4965_priv, request_scan);
6936 struct iwl4965_host_cmd cmd = {
b481de9c 6937 .id = REPLY_SCAN_CMD,
bb8c093b 6938 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
6939 .meta.flags = CMD_SIZE_HUGE,
6940 };
6941 int rc = 0;
bb8c093b 6942 struct iwl4965_scan_cmd *scan;
b481de9c
ZY
6943 struct ieee80211_conf *conf = NULL;
6944 u8 direct_mask;
6945 int phymode;
6946
6947 conf = ieee80211_get_hw_conf(priv->hw);
6948
6949 mutex_lock(&priv->mutex);
6950
bb8c093b 6951 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
6952 IWL_WARNING("request scan called when driver not ready.\n");
6953 goto done;
6954 }
6955
6956 /* Make sure the scan wasn't cancelled before this queued work
6957 * was given the chance to run... */
6958 if (!test_bit(STATUS_SCANNING, &priv->status))
6959 goto done;
6960
6961 /* This should never be called or scheduled if there is currently
6962 * a scan active in the hardware. */
6963 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6964 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6965 "Ignoring second request.\n");
6966 rc = -EIO;
6967 goto done;
6968 }
6969
6970 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6971 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6972 goto done;
6973 }
6974
6975 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6976 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6977 goto done;
6978 }
6979
bb8c093b 6980 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6981 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6982 goto done;
6983 }
6984
6985 if (!test_bit(STATUS_READY, &priv->status)) {
6986 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6987 goto done;
6988 }
6989
6990 if (!priv->scan_bands) {
6991 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6992 goto done;
6993 }
6994
6995 if (!priv->scan) {
bb8c093b 6996 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
6997 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6998 if (!priv->scan) {
6999 rc = -ENOMEM;
7000 goto done;
7001 }
7002 }
7003 scan = priv->scan;
bb8c093b 7004 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
7005
7006 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
7007 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
7008
bb8c093b 7009 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
7010 u16 interval = 0;
7011 u32 extra;
7012 u32 suspend_time = 100;
7013 u32 scan_suspend_time = 100;
7014 unsigned long flags;
7015
7016 IWL_DEBUG_INFO("Scanning while associated...\n");
7017
7018 spin_lock_irqsave(&priv->lock, flags);
7019 interval = priv->beacon_int;
7020 spin_unlock_irqrestore(&priv->lock, flags);
7021
7022 scan->suspend_time = 0;
052c4b9f 7023 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
7024 if (!interval)
7025 interval = suspend_time;
7026
7027 extra = (suspend_time / interval) << 22;
7028 scan_suspend_time = (extra |
7029 ((suspend_time % interval) * 1024));
7030 scan->suspend_time = cpu_to_le32(scan_suspend_time);
7031 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
7032 scan_suspend_time, interval);
7033 }
7034
7035 /* We should add the ability for user to lock to PASSIVE ONLY */
7036 if (priv->one_direct_scan) {
7037 IWL_DEBUG_SCAN
7038 ("Kicking off one direct scan for '%s'\n",
bb8c093b 7039 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
7040 priv->direct_ssid_len));
7041 scan->direct_scan[0].id = WLAN_EID_SSID;
7042 scan->direct_scan[0].len = priv->direct_ssid_len;
7043 memcpy(scan->direct_scan[0].ssid,
7044 priv->direct_ssid, priv->direct_ssid_len);
7045 direct_mask = 1;
bb8c093b 7046 } else if (!iwl4965_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
7047 scan->direct_scan[0].id = WLAN_EID_SSID;
7048 scan->direct_scan[0].len = priv->essid_len;
7049 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7050 direct_mask = 1;
7051 } else
7052 direct_mask = 0;
7053
7054 /* We don't build a direct scan probe request; the uCode will do
7055 * that based on the direct_mask added to each channel entry */
7056 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 7057 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
18904f58 7058 IWL_MAX_SCAN_SIZE - sizeof(*scan), 0));
b481de9c
ZY
7059 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7060 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
7061 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7062
7063 /* flags + rate selection */
7064
7065 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7066
7067 switch (priv->scan_bands) {
7068 case 2:
7069 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
7070 scan->tx_cmd.rate_n_flags =
bb8c093b 7071 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
7072 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7073
7074 scan->good_CRC_th = 0;
7075 phymode = MODE_IEEE80211G;
7076 break;
7077
7078 case 1:
7079 scan->tx_cmd.rate_n_flags =
bb8c093b 7080 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
7081 RATE_MCS_ANT_B_MSK);
7082 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7083 phymode = MODE_IEEE80211A;
7084 break;
7085
7086 default:
7087 IWL_WARNING("Invalid scan band count\n");
7088 goto done;
7089 }
7090
7091 /* select Rx chains */
7092
7093 /* Force use of chains B and C (0x6) for scan Rx.
7094 * Avoid A (0x1) because of its off-channel reception on A-band.
7095 * MIMO is not used here, but value is required to make uCode happy. */
7096 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7097 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7098 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7099 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7100
7101 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7102 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7103
7104 if (direct_mask)
7105 IWL_DEBUG_SCAN
7106 ("Initiating direct scan for %s.\n",
bb8c093b 7107 iwl4965_escape_essid(priv->essid, priv->essid_len));
b481de9c
ZY
7108 else
7109 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7110
7111 scan->channel_count =
bb8c093b 7112 iwl4965_get_channels_for_scan(
b481de9c
ZY
7113 priv, phymode, 1, /* active */
7114 direct_mask,
7115 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7116
7117 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 7118 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
7119 cmd.data = scan;
7120 scan->len = cpu_to_le16(cmd.len);
7121
7122 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 7123 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
7124 if (rc)
7125 goto done;
7126
7127 queue_delayed_work(priv->workqueue, &priv->scan_check,
7128 IWL_SCAN_CHECK_WATCHDOG);
7129
7130 mutex_unlock(&priv->mutex);
7131 return;
7132
7133 done:
01ebd063 7134 /* inform mac80211 scan aborted */
b481de9c
ZY
7135 queue_work(priv->workqueue, &priv->scan_completed);
7136 mutex_unlock(&priv->mutex);
7137}
7138
bb8c093b 7139static void iwl4965_bg_up(struct work_struct *data)
b481de9c 7140{
bb8c093b 7141 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up);
b481de9c
ZY
7142
7143 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7144 return;
7145
7146 mutex_lock(&priv->mutex);
bb8c093b 7147 __iwl4965_up(priv);
b481de9c
ZY
7148 mutex_unlock(&priv->mutex);
7149}
7150
bb8c093b 7151static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 7152{
bb8c093b 7153 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart);
b481de9c
ZY
7154
7155 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7156 return;
7157
bb8c093b 7158 iwl4965_down(priv);
b481de9c
ZY
7159 queue_work(priv->workqueue, &priv->up);
7160}
7161
bb8c093b 7162static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 7163{
bb8c093b
CH
7164 struct iwl4965_priv *priv =
7165 container_of(data, struct iwl4965_priv, rx_replenish);
b481de9c
ZY
7166
7167 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7168 return;
7169
7170 mutex_lock(&priv->mutex);
bb8c093b 7171 iwl4965_rx_replenish(priv);
b481de9c
ZY
7172 mutex_unlock(&priv->mutex);
7173}
7174
7878a5a4
MA
7175#define IWL_DELAY_NEXT_SCAN (HZ*2)
7176
bb8c093b 7177static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 7178{
bb8c093b 7179 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
b481de9c
ZY
7180 post_associate.work);
7181
7182 int rc = 0;
7183 struct ieee80211_conf *conf = NULL;
0795af57 7184 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7185
7186 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7187 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7188 return;
7189 }
7190
0795af57
JP
7191 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
7192 priv->assoc_id,
7193 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
7194
7195
7196 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7197 return;
7198
7199 mutex_lock(&priv->mutex);
7200
32bfd35d 7201 if (!priv->vif || !priv->is_open) {
948c171c
MA
7202 mutex_unlock(&priv->mutex);
7203 return;
7204 }
bb8c093b 7205 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 7206
b481de9c
ZY
7207 conf = ieee80211_get_hw_conf(priv->hw);
7208
7209 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7210 iwl4965_commit_rxon(priv);
b481de9c 7211
bb8c093b
CH
7212 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7213 iwl4965_setup_rxon_timing(priv);
7214 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7215 sizeof(priv->rxon_timing), &priv->rxon_timing);
7216 if (rc)
7217 IWL_WARNING("REPLY_RXON_TIMING failed - "
7218 "Attempting to continue.\n");
7219
7220 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7221
c8b0e6e1 7222#ifdef CONFIG_IWL4965_HT
fd105e79
RR
7223 if (priv->current_ht_config.is_ht)
7224 iwl4965_set_rxon_ht(priv, &priv->current_ht_config);
c8b0e6e1 7225#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
7226 iwl4965_set_rxon_chain(priv);
7227 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7228
7229 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7230 priv->assoc_id, priv->beacon_int);
7231
7232 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7233 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7234 else
7235 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7236
7237 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7238 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7239 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7240 else
7241 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7242
7243 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7244 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7245
7246 }
7247
bb8c093b 7248 iwl4965_commit_rxon(priv);
b481de9c
ZY
7249
7250 switch (priv->iw_mode) {
7251 case IEEE80211_IF_TYPE_STA:
bb8c093b 7252 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
7253 break;
7254
7255 case IEEE80211_IF_TYPE_IBSS:
7256
7257 /* clear out the station table */
bb8c093b 7258 iwl4965_clear_stations_table(priv);
b481de9c 7259
bb8c093b
CH
7260 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7261 iwl4965_rxon_add_station(priv, priv->bssid, 0);
7262 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
7263 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7264
7265 break;
7266
7267 default:
7268 IWL_ERROR("%s Should not be called in %d mode\n",
7269 __FUNCTION__, priv->iw_mode);
7270 break;
7271 }
7272
bb8c093b 7273 iwl4965_sequence_reset(priv);
b481de9c 7274
c8b0e6e1 7275#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
7276 /* Enable Rx differential gain and sensitivity calibrations */
7277 iwl4965_chain_noise_reset(priv);
7278 priv->start_calib = 1;
c8b0e6e1 7279#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
7280
7281 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7282 priv->assoc_station_added = 1;
7283
c8b0e6e1 7284#ifdef CONFIG_IWL4965_QOS
bb8c093b 7285 iwl4965_activate_qos(priv, 0);
c8b0e6e1 7286#endif /* CONFIG_IWL4965_QOS */
7878a5a4
MA
7287 /* we have just associated, don't start scan too early */
7288 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
b481de9c
ZY
7289 mutex_unlock(&priv->mutex);
7290}
7291
bb8c093b 7292static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 7293{
bb8c093b 7294 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan);
b481de9c 7295
bb8c093b 7296 if (!iwl4965_is_ready(priv))
b481de9c
ZY
7297 return;
7298
7299 mutex_lock(&priv->mutex);
7300
7301 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 7302 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7303
7304 mutex_unlock(&priv->mutex);
7305}
7306
76bb77e0
ZY
7307static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
7308
bb8c093b 7309static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 7310{
bb8c093b
CH
7311 struct iwl4965_priv *priv =
7312 container_of(work, struct iwl4965_priv, scan_completed);
b481de9c
ZY
7313
7314 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7315
7316 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7317 return;
7318
a0646470
ZY
7319 if (test_bit(STATUS_CONF_PENDING, &priv->status))
7320 iwl4965_mac_config(priv->hw, ieee80211_get_hw_conf(priv->hw));
76bb77e0 7321
b481de9c
ZY
7322 ieee80211_scan_completed(priv->hw);
7323
7324 /* Since setting the TXPOWER may have been deferred while
7325 * performing the scan, fire one off */
7326 mutex_lock(&priv->mutex);
bb8c093b 7327 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
7328 mutex_unlock(&priv->mutex);
7329}
7330
7331/*****************************************************************************
7332 *
7333 * mac80211 entry point functions
7334 *
7335 *****************************************************************************/
7336
5a66926a
ZY
7337#define UCODE_READY_TIMEOUT (2 * HZ)
7338
bb8c093b 7339static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 7340{
bb8c093b 7341 struct iwl4965_priv *priv = hw->priv;
5a66926a 7342 int ret;
b481de9c
ZY
7343
7344 IWL_DEBUG_MAC80211("enter\n");
7345
5a66926a
ZY
7346 if (pci_enable_device(priv->pci_dev)) {
7347 IWL_ERROR("Fail to pci_enable_device\n");
7348 return -ENODEV;
7349 }
7350 pci_restore_state(priv->pci_dev);
7351 pci_enable_msi(priv->pci_dev);
7352
7353 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED,
7354 DRV_NAME, priv);
7355 if (ret) {
7356 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
7357 goto out_disable_msi;
7358 }
7359
b481de9c
ZY
7360 /* we should be verifying the device is ready to be opened */
7361 mutex_lock(&priv->mutex);
7362
5a66926a
ZY
7363 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd));
7364 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
7365 * ucode filename and max sizes are card-specific. */
b481de9c 7366
5a66926a
ZY
7367 if (!priv->ucode_code.len) {
7368 ret = iwl4965_read_ucode(priv);
7369 if (ret) {
7370 IWL_ERROR("Could not read microcode: %d\n", ret);
7371 mutex_unlock(&priv->mutex);
7372 goto out_release_irq;
7373 }
7374 }
b481de9c 7375
e655b9f0 7376 ret = __iwl4965_up(priv);
5a66926a 7377
b481de9c 7378 mutex_unlock(&priv->mutex);
5a66926a 7379
e655b9f0
ZY
7380 if (ret)
7381 goto out_release_irq;
7382
7383 IWL_DEBUG_INFO("Start UP work done.\n");
7384
7385 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
7386 return 0;
7387
5a66926a
ZY
7388 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
7389 * mac80211 will not be run successfully. */
7390 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
7391 test_bit(STATUS_READY, &priv->status),
7392 UCODE_READY_TIMEOUT);
7393 if (!ret) {
7394 if (!test_bit(STATUS_READY, &priv->status)) {
7395 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n",
7396 jiffies_to_msecs(UCODE_READY_TIMEOUT));
7397 ret = -ETIMEDOUT;
7398 goto out_release_irq;
7399 }
7400 }
7401
e655b9f0 7402 priv->is_open = 1;
b481de9c
ZY
7403 IWL_DEBUG_MAC80211("leave\n");
7404 return 0;
5a66926a
ZY
7405
7406out_release_irq:
7407 free_irq(priv->pci_dev->irq, priv);
7408out_disable_msi:
7409 pci_disable_msi(priv->pci_dev);
e655b9f0
ZY
7410 pci_disable_device(priv->pci_dev);
7411 priv->is_open = 0;
7412 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 7413 return ret;
b481de9c
ZY
7414}
7415
bb8c093b 7416static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 7417{
bb8c093b 7418 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7419
7420 IWL_DEBUG_MAC80211("enter\n");
948c171c 7421
e655b9f0
ZY
7422 if (!priv->is_open) {
7423 IWL_DEBUG_MAC80211("leave - skip\n");
7424 return;
7425 }
7426
b481de9c 7427 priv->is_open = 0;
5a66926a
ZY
7428
7429 if (iwl4965_is_ready_rf(priv)) {
e655b9f0
ZY
7430 /* stop mac, cancel any scan request and clear
7431 * RXON_FILTER_ASSOC_MSK BIT
7432 */
5a66926a
ZY
7433 mutex_lock(&priv->mutex);
7434 iwl4965_scan_cancel_timeout(priv, 100);
7435 cancel_delayed_work(&priv->post_associate);
fde3571f 7436 mutex_unlock(&priv->mutex);
fde3571f
MA
7437 }
7438
5a66926a
ZY
7439 iwl4965_down(priv);
7440
7441 flush_workqueue(priv->workqueue);
7442 free_irq(priv->pci_dev->irq, priv);
7443 pci_disable_msi(priv->pci_dev);
7444 pci_save_state(priv->pci_dev);
7445 pci_disable_device(priv->pci_dev);
948c171c 7446
b481de9c 7447 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7448}
7449
bb8c093b 7450static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7451 struct ieee80211_tx_control *ctl)
7452{
bb8c093b 7453 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7454
7455 IWL_DEBUG_MAC80211("enter\n");
7456
7457 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7458 IWL_DEBUG_MAC80211("leave - monitor\n");
7459 return -1;
7460 }
7461
7462 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7463 ctl->tx_rate);
7464
bb8c093b 7465 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
7466 dev_kfree_skb_any(skb);
7467
7468 IWL_DEBUG_MAC80211("leave\n");
7469 return 0;
7470}
7471
bb8c093b 7472static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7473 struct ieee80211_if_init_conf *conf)
7474{
bb8c093b 7475 struct iwl4965_priv *priv = hw->priv;
b481de9c 7476 unsigned long flags;
0795af57 7477 DECLARE_MAC_BUF(mac);
b481de9c 7478
32bfd35d 7479 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 7480
32bfd35d
JB
7481 if (priv->vif) {
7482 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
75849d28 7483 return -EOPNOTSUPP;
b481de9c
ZY
7484 }
7485
7486 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 7487 priv->vif = conf->vif;
b481de9c
ZY
7488
7489 spin_unlock_irqrestore(&priv->lock, flags);
7490
7491 mutex_lock(&priv->mutex);
864792e3
TW
7492
7493 if (conf->mac_addr) {
7494 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
7495 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7496 }
b481de9c 7497
5a66926a
ZY
7498 if (iwl4965_is_ready(priv))
7499 iwl4965_set_mode(priv, conf->type);
7500
b481de9c
ZY
7501 mutex_unlock(&priv->mutex);
7502
5a66926a 7503 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7504 return 0;
7505}
7506
7507/**
bb8c093b 7508 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
7509 *
7510 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7511 * be set inappropriately and the driver currently sets the hardware up to
7512 * use it whenever needed.
7513 */
bb8c093b 7514static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 7515{
bb8c093b
CH
7516 struct iwl4965_priv *priv = hw->priv;
7517 const struct iwl4965_channel_info *ch_info;
b481de9c 7518 unsigned long flags;
76bb77e0 7519 int ret = 0;
b481de9c
ZY
7520
7521 mutex_lock(&priv->mutex);
7522 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7523
12342c47
ZY
7524 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
7525
bb8c093b 7526 if (!iwl4965_is_ready(priv)) {
b481de9c 7527 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
7528 ret = -EIO;
7529 goto out;
b481de9c
ZY
7530 }
7531
bb8c093b 7532 if (unlikely(!iwl4965_param_disable_hw_scan &&
b481de9c 7533 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
7534 IWL_DEBUG_MAC80211("leave - scanning\n");
7535 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 7536 mutex_unlock(&priv->mutex);
a0646470 7537 return 0;
b481de9c
ZY
7538 }
7539
7540 spin_lock_irqsave(&priv->lock, flags);
7541
bb8c093b 7542 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel);
b481de9c
ZY
7543 if (!is_channel_valid(ch_info)) {
7544 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7545 conf->channel, conf->phymode);
7546 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7547 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
7548 ret = -EINVAL;
7549 goto out;
b481de9c
ZY
7550 }
7551
c8b0e6e1 7552#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7553 /* if we are switching fron ht to 2.4 clear flags
7554 * from any ht related info since 2.4 does not
7555 * support ht */
7556 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7557#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7558 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7559#endif
7560 )
7561 priv->staging_rxon.flags = 0;
c8b0e6e1 7562#endif /* CONFIG_IWL4965_HT */
b481de9c 7563
bb8c093b 7564 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel);
b481de9c 7565
bb8c093b 7566 iwl4965_set_flags_for_phymode(priv, conf->phymode);
b481de9c
ZY
7567
7568 /* The list of supported rates and rate mask can be different
7569 * for each phymode; since the phymode may have changed, reset
7570 * the rate mask to what mac80211 lists */
bb8c093b 7571 iwl4965_set_rate(priv);
b481de9c
ZY
7572
7573 spin_unlock_irqrestore(&priv->lock, flags);
7574
7575#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7576 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 7577 iwl4965_hw_channel_switch(priv, conf->channel);
76bb77e0 7578 goto out;
b481de9c
ZY
7579 }
7580#endif
7581
bb8c093b 7582 iwl4965_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
7583
7584 if (!conf->radio_enabled) {
7585 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 7586 goto out;
b481de9c
ZY
7587 }
7588
bb8c093b 7589 if (iwl4965_is_rfkill(priv)) {
b481de9c 7590 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
7591 ret = -EIO;
7592 goto out;
b481de9c
ZY
7593 }
7594
bb8c093b 7595 iwl4965_set_rate(priv);
b481de9c
ZY
7596
7597 if (memcmp(&priv->active_rxon,
7598 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 7599 iwl4965_commit_rxon(priv);
b481de9c
ZY
7600 else
7601 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7602
7603 IWL_DEBUG_MAC80211("leave\n");
7604
a0646470
ZY
7605out:
7606 clear_bit(STATUS_CONF_PENDING, &priv->status);
5a66926a 7607 mutex_unlock(&priv->mutex);
76bb77e0 7608 return ret;
b481de9c
ZY
7609}
7610
bb8c093b 7611static void iwl4965_config_ap(struct iwl4965_priv *priv)
b481de9c
ZY
7612{
7613 int rc = 0;
7614
d986bcd1 7615 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
b481de9c
ZY
7616 return;
7617
7618 /* The following should be done only at AP bring up */
7619 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7620
7621 /* RXON - unassoc (to set timing command) */
7622 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7623 iwl4965_commit_rxon(priv);
b481de9c
ZY
7624
7625 /* RXON Timing */
bb8c093b
CH
7626 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7627 iwl4965_setup_rxon_timing(priv);
7628 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7629 sizeof(priv->rxon_timing), &priv->rxon_timing);
7630 if (rc)
7631 IWL_WARNING("REPLY_RXON_TIMING failed - "
7632 "Attempting to continue.\n");
7633
7634 iwl4965_set_rxon_chain(priv);
7635
7636 /* FIXME: what should be the assoc_id for AP? */
7637 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7638 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7639 priv->staging_rxon.flags |=
7640 RXON_FLG_SHORT_PREAMBLE_MSK;
7641 else
7642 priv->staging_rxon.flags &=
7643 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7644
7645 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7646 if (priv->assoc_capability &
7647 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7648 priv->staging_rxon.flags |=
7649 RXON_FLG_SHORT_SLOT_MSK;
7650 else
7651 priv->staging_rxon.flags &=
7652 ~RXON_FLG_SHORT_SLOT_MSK;
7653
7654 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7655 priv->staging_rxon.flags &=
7656 ~RXON_FLG_SHORT_SLOT_MSK;
7657 }
7658 /* restore RXON assoc */
7659 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 7660 iwl4965_commit_rxon(priv);
c8b0e6e1 7661#ifdef CONFIG_IWL4965_QOS
bb8c093b 7662 iwl4965_activate_qos(priv, 1);
b481de9c 7663#endif
bb8c093b 7664 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 7665 }
bb8c093b 7666 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7667
7668 /* FIXME - we need to add code here to detect a totally new
7669 * configuration, reset the AP, unassoc, rxon timing, assoc,
7670 * clear sta table, add BCAST sta... */
7671}
7672
32bfd35d
JB
7673static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7674 struct ieee80211_vif *vif,
b481de9c
ZY
7675 struct ieee80211_if_conf *conf)
7676{
bb8c093b 7677 struct iwl4965_priv *priv = hw->priv;
0795af57 7678 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7679 unsigned long flags;
7680 int rc;
7681
7682 if (conf == NULL)
7683 return -EIO;
7684
7685 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7686 (!conf->beacon || !conf->ssid_len)) {
7687 IWL_DEBUG_MAC80211
7688 ("Leaving in AP mode because HostAPD is not ready.\n");
7689 return 0;
7690 }
7691
5a66926a
ZY
7692 if (!iwl4965_is_alive(priv))
7693 return -EAGAIN;
7694
b481de9c
ZY
7695 mutex_lock(&priv->mutex);
7696
b481de9c 7697 if (conf->bssid)
0795af57
JP
7698 IWL_DEBUG_MAC80211("bssid: %s\n",
7699 print_mac(mac, conf->bssid));
b481de9c 7700
4150c572
JB
7701/*
7702 * very dubious code was here; the probe filtering flag is never set:
7703 *
b481de9c
ZY
7704 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7705 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572
JB
7706 */
7707 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
b481de9c
ZY
7708 IWL_DEBUG_MAC80211("leave - scanning\n");
7709 mutex_unlock(&priv->mutex);
7710 return 0;
7711 }
7712
32bfd35d
JB
7713 if (priv->vif != vif) {
7714 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b481de9c
ZY
7715 mutex_unlock(&priv->mutex);
7716 return 0;
7717 }
7718
7719 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7720 if (!conf->bssid) {
7721 conf->bssid = priv->mac_addr;
7722 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7723 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7724 print_mac(mac, conf->bssid));
b481de9c
ZY
7725 }
7726 if (priv->ibss_beacon)
7727 dev_kfree_skb(priv->ibss_beacon);
7728
7729 priv->ibss_beacon = conf->beacon;
7730 }
7731
fde3571f
MA
7732 if (iwl4965_is_rfkill(priv))
7733 goto done;
7734
b481de9c
ZY
7735 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7736 !is_multicast_ether_addr(conf->bssid)) {
7737 /* If there is currently a HW scan going on in the background
7738 * then we need to cancel it else the RXON below will fail. */
bb8c093b 7739 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
7740 IWL_WARNING("Aborted scan still in progress "
7741 "after 100ms\n");
7742 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7743 mutex_unlock(&priv->mutex);
7744 return -EAGAIN;
7745 }
7746 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7747
7748 /* TODO: Audit driver for usage of these members and see
7749 * if mac80211 deprecates them (priv->bssid looks like it
7750 * shouldn't be there, but I haven't scanned the IBSS code
7751 * to verify) - jpk */
7752 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7753
7754 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7755 iwl4965_config_ap(priv);
b481de9c 7756 else {
bb8c093b 7757 rc = iwl4965_commit_rxon(priv);
b481de9c 7758 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 7759 iwl4965_rxon_add_station(
b481de9c
ZY
7760 priv, priv->active_rxon.bssid_addr, 1);
7761 }
7762
7763 } else {
bb8c093b 7764 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 7765 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7766 iwl4965_commit_rxon(priv);
b481de9c
ZY
7767 }
7768
fde3571f 7769 done:
b481de9c
ZY
7770 spin_lock_irqsave(&priv->lock, flags);
7771 if (!conf->ssid_len)
7772 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7773 else
7774 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7775
7776 priv->essid_len = conf->ssid_len;
7777 spin_unlock_irqrestore(&priv->lock, flags);
7778
7779 IWL_DEBUG_MAC80211("leave\n");
7780 mutex_unlock(&priv->mutex);
7781
7782 return 0;
7783}
7784
bb8c093b 7785static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
7786 unsigned int changed_flags,
7787 unsigned int *total_flags,
7788 int mc_count, struct dev_addr_list *mc_list)
7789{
7790 /*
7791 * XXX: dummy
bb8c093b 7792 * see also iwl4965_connection_init_rx_config
4150c572
JB
7793 */
7794 *total_flags = 0;
7795}
7796
bb8c093b 7797static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7798 struct ieee80211_if_init_conf *conf)
7799{
bb8c093b 7800 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7801
7802 IWL_DEBUG_MAC80211("enter\n");
7803
7804 mutex_lock(&priv->mutex);
948c171c 7805
fde3571f
MA
7806 if (iwl4965_is_ready_rf(priv)) {
7807 iwl4965_scan_cancel_timeout(priv, 100);
7808 cancel_delayed_work(&priv->post_associate);
7809 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7810 iwl4965_commit_rxon(priv);
7811 }
32bfd35d
JB
7812 if (priv->vif == conf->vif) {
7813 priv->vif = NULL;
b481de9c
ZY
7814 memset(priv->bssid, 0, ETH_ALEN);
7815 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7816 priv->essid_len = 0;
7817 }
7818 mutex_unlock(&priv->mutex);
7819
7820 IWL_DEBUG_MAC80211("leave\n");
7821
7822}
471b3efd
JB
7823
7824static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
7825 struct ieee80211_vif *vif,
7826 struct ieee80211_bss_conf *bss_conf,
7827 u32 changes)
220173b0 7828{
bb8c093b 7829 struct iwl4965_priv *priv = hw->priv;
220173b0 7830
471b3efd
JB
7831 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
7832 if (bss_conf->use_short_preamble)
220173b0
TW
7833 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7834 else
7835 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7836 }
7837
471b3efd
JB
7838 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
7839 if (bss_conf->use_cts_prot && (priv->phymode != MODE_IEEE80211A))
220173b0
TW
7840 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7841 else
7842 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7843 }
7844
471b3efd
JB
7845 if (changes & BSS_CHANGED_ASSOC) {
7846 /*
7847 * TODO:
7848 * do stuff instead of sniffing assoc resp
7849 */
7850 }
7851
bb8c093b
CH
7852 if (iwl4965_is_associated(priv))
7853 iwl4965_send_rxon_assoc(priv);
220173b0 7854}
b481de9c 7855
bb8c093b 7856static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
7857{
7858 int rc = 0;
7859 unsigned long flags;
bb8c093b 7860 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7861
7862 IWL_DEBUG_MAC80211("enter\n");
7863
052c4b9f 7864 mutex_lock(&priv->mutex);
b481de9c
ZY
7865 spin_lock_irqsave(&priv->lock, flags);
7866
bb8c093b 7867 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7868 rc = -EIO;
7869 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7870 goto out_unlock;
7871 }
7872
7873 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7874 rc = -EIO;
7875 IWL_ERROR("ERROR: APs don't scan\n");
7876 goto out_unlock;
7877 }
7878
7878a5a4
MA
7879 /* we don't schedule scan within next_scan_jiffies period */
7880 if (priv->next_scan_jiffies &&
7881 time_after(priv->next_scan_jiffies, jiffies)) {
7882 rc = -EAGAIN;
7883 goto out_unlock;
7884 }
b481de9c 7885 /* if we just finished scan ask for delay */
7878a5a4
MA
7886 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
7887 IWL_DELAY_NEXT_SCAN, jiffies)) {
b481de9c
ZY
7888 rc = -EAGAIN;
7889 goto out_unlock;
7890 }
7891 if (len) {
7878a5a4 7892 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
bb8c093b 7893 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
7894
7895 priv->one_direct_scan = 1;
7896 priv->direct_ssid_len = (u8)
7897 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7898 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
7899 } else
7900 priv->one_direct_scan = 0;
b481de9c 7901
bb8c093b 7902 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
7903
7904 IWL_DEBUG_MAC80211("leave\n");
7905
7906out_unlock:
7907 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 7908 mutex_unlock(&priv->mutex);
b481de9c
ZY
7909
7910 return rc;
7911}
7912
bb8c093b 7913static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
7914 const u8 *local_addr, const u8 *addr,
7915 struct ieee80211_key_conf *key)
7916{
bb8c093b 7917 struct iwl4965_priv *priv = hw->priv;
0795af57 7918 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7919 int rc = 0;
7920 u8 sta_id;
7921
7922 IWL_DEBUG_MAC80211("enter\n");
7923
bb8c093b 7924 if (!iwl4965_param_hwcrypto) {
b481de9c
ZY
7925 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7926 return -EOPNOTSUPP;
7927 }
7928
7929 if (is_zero_ether_addr(addr))
7930 /* only support pairwise keys */
7931 return -EOPNOTSUPP;
7932
bb8c093b 7933 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c 7934 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7935 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7936 print_mac(mac, addr));
b481de9c
ZY
7937 return -EINVAL;
7938 }
7939
7940 mutex_lock(&priv->mutex);
7941
bb8c093b 7942 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7943
b481de9c
ZY
7944 switch (cmd) {
7945 case SET_KEY:
bb8c093b 7946 rc = iwl4965_update_sta_key_info(priv, key, sta_id);
b481de9c 7947 if (!rc) {
bb8c093b
CH
7948 iwl4965_set_rxon_hwcrypto(priv, 1);
7949 iwl4965_commit_rxon(priv);
b481de9c
ZY
7950 key->hw_key_idx = sta_id;
7951 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7952 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7953 }
7954 break;
7955 case DISABLE_KEY:
bb8c093b 7956 rc = iwl4965_clear_sta_key_info(priv, sta_id);
b481de9c 7957 if (!rc) {
bb8c093b
CH
7958 iwl4965_set_rxon_hwcrypto(priv, 0);
7959 iwl4965_commit_rxon(priv);
b481de9c
ZY
7960 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7961 }
7962 break;
7963 default:
7964 rc = -EINVAL;
7965 }
7966
7967 IWL_DEBUG_MAC80211("leave\n");
7968 mutex_unlock(&priv->mutex);
7969
7970 return rc;
7971}
7972
bb8c093b 7973static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7974 const struct ieee80211_tx_queue_params *params)
7975{
bb8c093b 7976 struct iwl4965_priv *priv = hw->priv;
c8b0e6e1 7977#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7978 unsigned long flags;
7979 int q;
0054b34d 7980#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7981
7982 IWL_DEBUG_MAC80211("enter\n");
7983
bb8c093b 7984 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7985 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7986 return -EIO;
7987 }
7988
7989 if (queue >= AC_NUM) {
7990 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7991 return 0;
7992 }
7993
c8b0e6e1 7994#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7995 if (!priv->qos_data.qos_enable) {
7996 priv->qos_data.qos_active = 0;
7997 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7998 return 0;
7999 }
8000 q = AC_NUM - 1 - queue;
8001
8002 spin_lock_irqsave(&priv->lock, flags);
8003
8004 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
8005 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
8006 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
8007 priv->qos_data.def_qos_parm.ac[q].edca_txop =
8008 cpu_to_le16((params->burst_time * 100));
8009
8010 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
8011 priv->qos_data.qos_active = 1;
8012
8013 spin_unlock_irqrestore(&priv->lock, flags);
8014
8015 mutex_lock(&priv->mutex);
8016 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b
CH
8017 iwl4965_activate_qos(priv, 1);
8018 else if (priv->assoc_id && iwl4965_is_associated(priv))
8019 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
8020
8021 mutex_unlock(&priv->mutex);
8022
c8b0e6e1 8023#endif /*CONFIG_IWL4965_QOS */
b481de9c
ZY
8024
8025 IWL_DEBUG_MAC80211("leave\n");
8026 return 0;
8027}
8028
bb8c093b 8029static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8030 struct ieee80211_tx_queue_stats *stats)
8031{
bb8c093b 8032 struct iwl4965_priv *priv = hw->priv;
b481de9c 8033 int i, avail;
bb8c093b
CH
8034 struct iwl4965_tx_queue *txq;
8035 struct iwl4965_queue *q;
b481de9c
ZY
8036 unsigned long flags;
8037
8038 IWL_DEBUG_MAC80211("enter\n");
8039
bb8c093b 8040 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8041 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8042 return -EIO;
8043 }
8044
8045 spin_lock_irqsave(&priv->lock, flags);
8046
8047 for (i = 0; i < AC_NUM; i++) {
8048 txq = &priv->txq[i];
8049 q = &txq->q;
bb8c093b 8050 avail = iwl4965_queue_space(q);
b481de9c
ZY
8051
8052 stats->data[i].len = q->n_window - avail;
8053 stats->data[i].limit = q->n_window - q->high_mark;
8054 stats->data[i].count = q->n_window;
8055
8056 }
8057 spin_unlock_irqrestore(&priv->lock, flags);
8058
8059 IWL_DEBUG_MAC80211("leave\n");
8060
8061 return 0;
8062}
8063
bb8c093b 8064static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8065 struct ieee80211_low_level_stats *stats)
8066{
8067 IWL_DEBUG_MAC80211("enter\n");
8068 IWL_DEBUG_MAC80211("leave\n");
8069
8070 return 0;
8071}
8072
bb8c093b 8073static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
8074{
8075 IWL_DEBUG_MAC80211("enter\n");
8076 IWL_DEBUG_MAC80211("leave\n");
8077
8078 return 0;
8079}
8080
bb8c093b 8081static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 8082{
bb8c093b 8083 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8084 unsigned long flags;
8085
8086 mutex_lock(&priv->mutex);
8087 IWL_DEBUG_MAC80211("enter\n");
8088
8089 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 8090#ifdef CONFIG_IWL4965_HT
b481de9c 8091 spin_lock_irqsave(&priv->lock, flags);
fd105e79 8092 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
b481de9c 8093 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 8094#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
8095/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8096 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8097
bb8c093b 8098 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
b481de9c
ZY
8099 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8100 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8101 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8102
8103 if (priv->lq_mngr.agg_ctrl.auto_agg)
8104 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
c8b0e6e1
CH
8105#endif /*CONFIG_IWL4965_HT_AGG */
8106#endif /* CONFIG_IWL4965_HT */
b481de9c 8107
c8b0e6e1 8108#ifdef CONFIG_IWL4965_QOS
bb8c093b 8109 iwl4965_reset_qos(priv);
b481de9c
ZY
8110#endif
8111
8112 cancel_delayed_work(&priv->post_associate);
8113
8114 spin_lock_irqsave(&priv->lock, flags);
8115 priv->assoc_id = 0;
8116 priv->assoc_capability = 0;
8117 priv->call_post_assoc_from_beacon = 0;
8118 priv->assoc_station_added = 0;
8119
8120 /* new association get rid of ibss beacon skb */
8121 if (priv->ibss_beacon)
8122 dev_kfree_skb(priv->ibss_beacon);
8123
8124 priv->ibss_beacon = NULL;
8125
8126 priv->beacon_int = priv->hw->conf.beacon_int;
8127 priv->timestamp1 = 0;
8128 priv->timestamp0 = 0;
8129 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8130 priv->beacon_int = 0;
8131
8132 spin_unlock_irqrestore(&priv->lock, flags);
8133
fde3571f
MA
8134 if (!iwl4965_is_ready_rf(priv)) {
8135 IWL_DEBUG_MAC80211("leave - not ready\n");
8136 mutex_unlock(&priv->mutex);
8137 return;
8138 }
8139
052c4b9f 8140 /* we are restarting association process
8141 * clear RXON_FILTER_ASSOC_MSK bit
8142 */
8143 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 8144 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 8145 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 8146 iwl4965_commit_rxon(priv);
052c4b9f 8147 }
8148
b481de9c
ZY
8149 /* Per mac80211.h: This is only used in IBSS mode... */
8150 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 8151
b481de9c
ZY
8152 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
8153 mutex_unlock(&priv->mutex);
8154 return;
8155 }
8156
b481de9c
ZY
8157 priv->only_active_channel = 0;
8158
bb8c093b 8159 iwl4965_set_rate(priv);
b481de9c
ZY
8160
8161 mutex_unlock(&priv->mutex);
8162
8163 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8164}
8165
bb8c093b 8166static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
8167 struct ieee80211_tx_control *control)
8168{
bb8c093b 8169 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8170 unsigned long flags;
8171
8172 mutex_lock(&priv->mutex);
8173 IWL_DEBUG_MAC80211("enter\n");
8174
bb8c093b 8175 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8176 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8177 mutex_unlock(&priv->mutex);
8178 return -EIO;
8179 }
8180
8181 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
8182 IWL_DEBUG_MAC80211("leave - not IBSS\n");
8183 mutex_unlock(&priv->mutex);
8184 return -EIO;
8185 }
8186
8187 spin_lock_irqsave(&priv->lock, flags);
8188
8189 if (priv->ibss_beacon)
8190 dev_kfree_skb(priv->ibss_beacon);
8191
8192 priv->ibss_beacon = skb;
8193
8194 priv->assoc_id = 0;
8195
8196 IWL_DEBUG_MAC80211("leave\n");
8197 spin_unlock_irqrestore(&priv->lock, flags);
8198
c8b0e6e1 8199#ifdef CONFIG_IWL4965_QOS
bb8c093b 8200 iwl4965_reset_qos(priv);
b481de9c
ZY
8201#endif
8202
8203 queue_work(priv->workqueue, &priv->post_associate.work);
8204
8205 mutex_unlock(&priv->mutex);
8206
8207 return 0;
8208}
8209
c8b0e6e1 8210#ifdef CONFIG_IWL4965_HT
b481de9c 8211
fd105e79
RR
8212static void iwl4965_ht_info_fill(struct ieee80211_conf *conf,
8213 struct iwl4965_priv *priv)
b481de9c 8214{
fd105e79
RR
8215 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
8216 struct ieee80211_ht_info *ht_conf = &conf->ht_conf;
8217 struct ieee80211_ht_bss_info *ht_bss_conf = &conf->ht_bss_conf;
b481de9c
ZY
8218
8219 IWL_DEBUG_MAC80211("enter: \n");
8220
fd105e79
RR
8221 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) {
8222 iwl_conf->is_ht = 0;
8223 return;
b481de9c
ZY
8224 }
8225
fd105e79
RR
8226 iwl_conf->is_ht = 1;
8227 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8228
8229 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
8230 iwl_conf->sgf |= 0x1;
8231 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
8232 iwl_conf->sgf |= 0x2;
8233
8234 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
8235 iwl_conf->max_amsdu_size =
8236 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
8237 iwl_conf->supported_chan_width =
8238 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
8239 iwl_conf->tx_mimo_ps_mode =
8240 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8241 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
8242
8243 iwl_conf->control_channel = ht_bss_conf->primary_channel;
8244 iwl_conf->extension_chan_offset =
8245 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
8246 iwl_conf->tx_chan_width =
8247 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
8248 iwl_conf->ht_protection =
8249 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
8250 iwl_conf->non_GF_STA_present =
8251 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
8252
8253 IWL_DEBUG_MAC80211("control channel %d\n",
8254 iwl_conf->control_channel);
b481de9c 8255 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8256}
8257
bb8c093b 8258static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
fd105e79 8259 struct ieee80211_conf *conf)
b481de9c 8260{
bb8c093b 8261 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8262
8263 IWL_DEBUG_MAC80211("enter: \n");
8264
fd105e79 8265 iwl4965_ht_info_fill(conf, priv);
b481de9c
ZY
8266 iwl4965_set_rxon_chain(priv);
8267
8268 if (priv && priv->assoc_id &&
8269 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8270 unsigned long flags;
8271
8272 spin_lock_irqsave(&priv->lock, flags);
8273 if (priv->beacon_int)
8274 queue_work(priv->workqueue, &priv->post_associate.work);
8275 else
8276 priv->call_post_assoc_from_beacon = 1;
8277 spin_unlock_irqrestore(&priv->lock, flags);
8278 }
8279
fd105e79
RR
8280 IWL_DEBUG_MAC80211("leave:\n");
8281 return 0;
b481de9c
ZY
8282}
8283
bb8c093b 8284static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
8285 struct ieee80211_ht_cap *ht_cap,
8286 u8 use_current_config)
b481de9c 8287{
8fb88032
RR
8288 struct ieee80211_conf *conf = &hw->conf;
8289 struct ieee80211_hw_mode *mode = conf->mode;
b481de9c 8290
8fb88032
RR
8291 if (use_current_config) {
8292 ht_cap->cap_info = cpu_to_le16(conf->ht_conf.cap);
8293 memcpy(ht_cap->supp_mcs_set,
8294 conf->ht_conf.supp_mcs_set, 16);
8295 } else {
8296 ht_cap->cap_info = cpu_to_le16(mode->ht_info.cap);
8297 memcpy(ht_cap->supp_mcs_set,
8298 mode->ht_info.supp_mcs_set, 16);
8299 }
8300 ht_cap->ampdu_params_info =
8301 (mode->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
8302 ((mode->ht_info.ampdu_density << 2) &
8303 IEEE80211_HT_CAP_AMPDU_DENSITY);
b481de9c
ZY
8304}
8305
c8b0e6e1 8306#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
8307
8308/*****************************************************************************
8309 *
8310 * sysfs attributes
8311 *
8312 *****************************************************************************/
8313
c8b0e6e1 8314#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
8315
8316/*
8317 * The following adds a new attribute to the sysfs representation
8318 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8319 * used for controlling the debug level.
8320 *
8321 * See the level definitions in iwl for details.
8322 */
8323
8324static ssize_t show_debug_level(struct device_driver *d, char *buf)
8325{
bb8c093b 8326 return sprintf(buf, "0x%08X\n", iwl4965_debug_level);
b481de9c
ZY
8327}
8328static ssize_t store_debug_level(struct device_driver *d,
8329 const char *buf, size_t count)
8330{
8331 char *p = (char *)buf;
8332 u32 val;
8333
8334 val = simple_strtoul(p, &p, 0);
8335 if (p == buf)
8336 printk(KERN_INFO DRV_NAME
8337 ": %s is not in hex or decimal form.\n", buf);
8338 else
bb8c093b 8339 iwl4965_debug_level = val;
b481de9c
ZY
8340
8341 return strnlen(buf, count);
8342}
8343
8344static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8345 show_debug_level, store_debug_level);
8346
c8b0e6e1 8347#endif /* CONFIG_IWL4965_DEBUG */
b481de9c
ZY
8348
8349static ssize_t show_rf_kill(struct device *d,
8350 struct device_attribute *attr, char *buf)
8351{
8352 /*
8353 * 0 - RF kill not enabled
8354 * 1 - SW based RF kill active (sysfs)
8355 * 2 - HW based RF kill active
8356 * 3 - Both HW and SW based RF kill active
8357 */
bb8c093b 8358 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8359 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8360 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8361
8362 return sprintf(buf, "%i\n", val);
8363}
8364
8365static ssize_t store_rf_kill(struct device *d,
8366 struct device_attribute *attr,
8367 const char *buf, size_t count)
8368{
bb8c093b 8369 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8370
8371 mutex_lock(&priv->mutex);
bb8c093b 8372 iwl4965_radio_kill_sw(priv, buf[0] == '1');
b481de9c
ZY
8373 mutex_unlock(&priv->mutex);
8374
8375 return count;
8376}
8377
8378static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8379
8380static ssize_t show_temperature(struct device *d,
8381 struct device_attribute *attr, char *buf)
8382{
bb8c093b 8383 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c 8384
bb8c093b 8385 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8386 return -EAGAIN;
8387
bb8c093b 8388 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
8389}
8390
8391static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8392
8393static ssize_t show_rs_window(struct device *d,
8394 struct device_attribute *attr,
8395 char *buf)
8396{
bb8c093b
CH
8397 struct iwl4965_priv *priv = d->driver_data;
8398 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
8399}
8400static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8401
8402static ssize_t show_tx_power(struct device *d,
8403 struct device_attribute *attr, char *buf)
8404{
bb8c093b 8405 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8406 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8407}
8408
8409static ssize_t store_tx_power(struct device *d,
8410 struct device_attribute *attr,
8411 const char *buf, size_t count)
8412{
bb8c093b 8413 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8414 char *p = (char *)buf;
8415 u32 val;
8416
8417 val = simple_strtoul(p, &p, 10);
8418 if (p == buf)
8419 printk(KERN_INFO DRV_NAME
8420 ": %s is not in decimal form.\n", buf);
8421 else
bb8c093b 8422 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
8423
8424 return count;
8425}
8426
8427static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8428
8429static ssize_t show_flags(struct device *d,
8430 struct device_attribute *attr, char *buf)
8431{
bb8c093b 8432 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8433
8434 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8435}
8436
8437static ssize_t store_flags(struct device *d,
8438 struct device_attribute *attr,
8439 const char *buf, size_t count)
8440{
bb8c093b 8441 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8442 u32 flags = simple_strtoul(buf, NULL, 0);
8443
8444 mutex_lock(&priv->mutex);
8445 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8446 /* Cancel any currently running scans... */
bb8c093b 8447 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8448 IWL_WARNING("Could not cancel scan.\n");
8449 else {
8450 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8451 flags);
8452 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 8453 iwl4965_commit_rxon(priv);
b481de9c
ZY
8454 }
8455 }
8456 mutex_unlock(&priv->mutex);
8457
8458 return count;
8459}
8460
8461static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8462
8463static ssize_t show_filter_flags(struct device *d,
8464 struct device_attribute *attr, char *buf)
8465{
bb8c093b 8466 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8467
8468 return sprintf(buf, "0x%04X\n",
8469 le32_to_cpu(priv->active_rxon.filter_flags));
8470}
8471
8472static ssize_t store_filter_flags(struct device *d,
8473 struct device_attribute *attr,
8474 const char *buf, size_t count)
8475{
bb8c093b 8476 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8477 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8478
8479 mutex_lock(&priv->mutex);
8480 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8481 /* Cancel any currently running scans... */
bb8c093b 8482 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8483 IWL_WARNING("Could not cancel scan.\n");
8484 else {
8485 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8486 "0x%04X\n", filter_flags);
8487 priv->staging_rxon.filter_flags =
8488 cpu_to_le32(filter_flags);
bb8c093b 8489 iwl4965_commit_rxon(priv);
b481de9c
ZY
8490 }
8491 }
8492 mutex_unlock(&priv->mutex);
8493
8494 return count;
8495}
8496
8497static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8498 store_filter_flags);
8499
8500static ssize_t show_tune(struct device *d,
8501 struct device_attribute *attr, char *buf)
8502{
bb8c093b 8503 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8504
8505 return sprintf(buf, "0x%04X\n",
8506 (priv->phymode << 8) |
8507 le16_to_cpu(priv->active_rxon.channel));
8508}
8509
bb8c093b 8510static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
b481de9c
ZY
8511
8512static ssize_t store_tune(struct device *d,
8513 struct device_attribute *attr,
8514 const char *buf, size_t count)
8515{
bb8c093b 8516 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8517 char *p = (char *)buf;
8518 u16 tune = simple_strtoul(p, &p, 0);
8519 u8 phymode = (tune >> 8) & 0xff;
8520 u16 channel = tune & 0xff;
8521
8522 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8523
8524 mutex_lock(&priv->mutex);
8525 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8526 (priv->phymode != phymode)) {
bb8c093b 8527 const struct iwl4965_channel_info *ch_info;
b481de9c 8528
bb8c093b 8529 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
b481de9c
ZY
8530 if (!ch_info) {
8531 IWL_WARNING("Requested invalid phymode/channel "
8532 "combination: %d %d\n", phymode, channel);
8533 mutex_unlock(&priv->mutex);
8534 return -EINVAL;
8535 }
8536
8537 /* Cancel any currently running scans... */
bb8c093b 8538 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8539 IWL_WARNING("Could not cancel scan.\n");
8540 else {
8541 IWL_DEBUG_INFO("Committing phymode and "
8542 "rxon.channel = %d %d\n",
8543 phymode, channel);
8544
bb8c093b
CH
8545 iwl4965_set_rxon_channel(priv, phymode, channel);
8546 iwl4965_set_flags_for_phymode(priv, phymode);
b481de9c 8547
bb8c093b
CH
8548 iwl4965_set_rate(priv);
8549 iwl4965_commit_rxon(priv);
b481de9c
ZY
8550 }
8551 }
8552 mutex_unlock(&priv->mutex);
8553
8554 return count;
8555}
8556
8557static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8558
c8b0e6e1 8559#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8560
8561static ssize_t show_measurement(struct device *d,
8562 struct device_attribute *attr, char *buf)
8563{
bb8c093b
CH
8564 struct iwl4965_priv *priv = dev_get_drvdata(d);
8565 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
8566 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8567 u8 *data = (u8 *) & measure_report;
8568 unsigned long flags;
8569
8570 spin_lock_irqsave(&priv->lock, flags);
8571 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8572 spin_unlock_irqrestore(&priv->lock, flags);
8573 return 0;
8574 }
8575 memcpy(&measure_report, &priv->measure_report, size);
8576 priv->measurement_status = 0;
8577 spin_unlock_irqrestore(&priv->lock, flags);
8578
8579 while (size && (PAGE_SIZE - len)) {
8580 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8581 PAGE_SIZE - len, 1);
8582 len = strlen(buf);
8583 if (PAGE_SIZE - len)
8584 buf[len++] = '\n';
8585
8586 ofs += 16;
8587 size -= min(size, 16U);
8588 }
8589
8590 return len;
8591}
8592
8593static ssize_t store_measurement(struct device *d,
8594 struct device_attribute *attr,
8595 const char *buf, size_t count)
8596{
bb8c093b 8597 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8598 struct ieee80211_measurement_params params = {
8599 .channel = le16_to_cpu(priv->active_rxon.channel),
8600 .start_time = cpu_to_le64(priv->last_tsf),
8601 .duration = cpu_to_le16(1),
8602 };
8603 u8 type = IWL_MEASURE_BASIC;
8604 u8 buffer[32];
8605 u8 channel;
8606
8607 if (count) {
8608 char *p = buffer;
8609 strncpy(buffer, buf, min(sizeof(buffer), count));
8610 channel = simple_strtoul(p, NULL, 0);
8611 if (channel)
8612 params.channel = channel;
8613
8614 p = buffer;
8615 while (*p && *p != ' ')
8616 p++;
8617 if (*p)
8618 type = simple_strtoul(p + 1, NULL, 0);
8619 }
8620
8621 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8622 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 8623 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
8624
8625 return count;
8626}
8627
8628static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8629 show_measurement, store_measurement);
c8b0e6e1 8630#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
8631
8632static ssize_t store_retry_rate(struct device *d,
8633 struct device_attribute *attr,
8634 const char *buf, size_t count)
8635{
bb8c093b 8636 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8637
8638 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8639 if (priv->retry_rate <= 0)
8640 priv->retry_rate = 1;
8641
8642 return count;
8643}
8644
8645static ssize_t show_retry_rate(struct device *d,
8646 struct device_attribute *attr, char *buf)
8647{
bb8c093b 8648 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8649 return sprintf(buf, "%d", priv->retry_rate);
8650}
8651
8652static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8653 store_retry_rate);
8654
8655static ssize_t store_power_level(struct device *d,
8656 struct device_attribute *attr,
8657 const char *buf, size_t count)
8658{
bb8c093b 8659 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8660 int rc;
8661 int mode;
8662
8663 mode = simple_strtoul(buf, NULL, 0);
8664 mutex_lock(&priv->mutex);
8665
bb8c093b 8666 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
8667 rc = -EAGAIN;
8668 goto out;
8669 }
8670
8671 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8672 mode = IWL_POWER_AC;
8673 else
8674 mode |= IWL_POWER_ENABLED;
8675
8676 if (mode != priv->power_mode) {
bb8c093b 8677 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
8678 if (rc) {
8679 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8680 goto out;
8681 }
8682 priv->power_mode = mode;
8683 }
8684
8685 rc = count;
8686
8687 out:
8688 mutex_unlock(&priv->mutex);
8689 return rc;
8690}
8691
8692#define MAX_WX_STRING 80
8693
8694/* Values are in microsecond */
8695static const s32 timeout_duration[] = {
8696 350000,
8697 250000,
8698 75000,
8699 37000,
8700 25000,
8701};
8702static const s32 period_duration[] = {
8703 400000,
8704 700000,
8705 1000000,
8706 1000000,
8707 1000000
8708};
8709
8710static ssize_t show_power_level(struct device *d,
8711 struct device_attribute *attr, char *buf)
8712{
bb8c093b 8713 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8714 int level = IWL_POWER_LEVEL(priv->power_mode);
8715 char *p = buf;
8716
8717 p += sprintf(p, "%d ", level);
8718 switch (level) {
8719 case IWL_POWER_MODE_CAM:
8720 case IWL_POWER_AC:
8721 p += sprintf(p, "(AC)");
8722 break;
8723 case IWL_POWER_BATTERY:
8724 p += sprintf(p, "(BATTERY)");
8725 break;
8726 default:
8727 p += sprintf(p,
8728 "(Timeout %dms, Period %dms)",
8729 timeout_duration[level - 1] / 1000,
8730 period_duration[level - 1] / 1000);
8731 }
8732
8733 if (!(priv->power_mode & IWL_POWER_ENABLED))
8734 p += sprintf(p, " OFF\n");
8735 else
8736 p += sprintf(p, " \n");
8737
8738 return (p - buf + 1);
8739
8740}
8741
8742static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8743 store_power_level);
8744
8745static ssize_t show_channels(struct device *d,
8746 struct device_attribute *attr, char *buf)
8747{
bb8c093b 8748 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8749 int len = 0, i;
8750 struct ieee80211_channel *channels = NULL;
8751 const struct ieee80211_hw_mode *hw_mode = NULL;
8752 int count = 0;
8753
bb8c093b 8754 if (!iwl4965_is_ready(priv))
b481de9c
ZY
8755 return -EAGAIN;
8756
bb8c093b 8757 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
b481de9c 8758 if (!hw_mode)
bb8c093b 8759 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
b481de9c
ZY
8760 if (hw_mode) {
8761 channels = hw_mode->channels;
8762 count = hw_mode->num_channels;
8763 }
8764
8765 len +=
8766 sprintf(&buf[len],
8767 "Displaying %d channels in 2.4GHz band "
8768 "(802.11bg):\n", count);
8769
8770 for (i = 0; i < count; i++)
8771 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8772 channels[i].chan,
8773 channels[i].power_level,
8774 channels[i].
8775 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8776 " (IEEE 802.11h required)" : "",
8777 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8778 || (channels[i].
8779 flag &
8780 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8781 ", IBSS",
8782 channels[i].
8783 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8784 "active/passive" : "passive only");
8785
bb8c093b 8786 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
b481de9c
ZY
8787 if (hw_mode) {
8788 channels = hw_mode->channels;
8789 count = hw_mode->num_channels;
8790 } else {
8791 channels = NULL;
8792 count = 0;
8793 }
8794
8795 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8796 "(802.11a):\n", count);
8797
8798 for (i = 0; i < count; i++)
8799 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8800 channels[i].chan,
8801 channels[i].power_level,
8802 channels[i].
8803 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8804 " (IEEE 802.11h required)" : "",
8805 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8806 || (channels[i].
8807 flag &
8808 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8809 ", IBSS",
8810 channels[i].
8811 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8812 "active/passive" : "passive only");
8813
8814 return len;
8815}
8816
8817static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8818
8819static ssize_t show_statistics(struct device *d,
8820 struct device_attribute *attr, char *buf)
8821{
bb8c093b
CH
8822 struct iwl4965_priv *priv = dev_get_drvdata(d);
8823 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
8824 u32 len = 0, ofs = 0;
8825 u8 *data = (u8 *) & priv->statistics;
8826 int rc = 0;
8827
bb8c093b 8828 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8829 return -EAGAIN;
8830
8831 mutex_lock(&priv->mutex);
bb8c093b 8832 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
8833 mutex_unlock(&priv->mutex);
8834
8835 if (rc) {
8836 len = sprintf(buf,
8837 "Error sending statistics request: 0x%08X\n", rc);
8838 return len;
8839 }
8840
8841 while (size && (PAGE_SIZE - len)) {
8842 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8843 PAGE_SIZE - len, 1);
8844 len = strlen(buf);
8845 if (PAGE_SIZE - len)
8846 buf[len++] = '\n';
8847
8848 ofs += 16;
8849 size -= min(size, 16U);
8850 }
8851
8852 return len;
8853}
8854
8855static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8856
8857static ssize_t show_antenna(struct device *d,
8858 struct device_attribute *attr, char *buf)
8859{
bb8c093b 8860 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c 8861
bb8c093b 8862 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8863 return -EAGAIN;
8864
8865 return sprintf(buf, "%d\n", priv->antenna);
8866}
8867
8868static ssize_t store_antenna(struct device *d,
8869 struct device_attribute *attr,
8870 const char *buf, size_t count)
8871{
8872 int ant;
bb8c093b 8873 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8874
8875 if (count == 0)
8876 return 0;
8877
8878 if (sscanf(buf, "%1i", &ant) != 1) {
8879 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8880 return count;
8881 }
8882
8883 if ((ant >= 0) && (ant <= 2)) {
8884 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 8885 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
8886 } else
8887 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8888
8889
8890 return count;
8891}
8892
8893static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8894
8895static ssize_t show_status(struct device *d,
8896 struct device_attribute *attr, char *buf)
8897{
bb8c093b
CH
8898 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8899 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8900 return -EAGAIN;
8901 return sprintf(buf, "0x%08x\n", (int)priv->status);
8902}
8903
8904static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8905
8906static ssize_t dump_error_log(struct device *d,
8907 struct device_attribute *attr,
8908 const char *buf, size_t count)
8909{
8910 char *p = (char *)buf;
8911
8912 if (p[0] == '1')
bb8c093b 8913 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8914
8915 return strnlen(buf, count);
8916}
8917
8918static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8919
8920static ssize_t dump_event_log(struct device *d,
8921 struct device_attribute *attr,
8922 const char *buf, size_t count)
8923{
8924 char *p = (char *)buf;
8925
8926 if (p[0] == '1')
bb8c093b 8927 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8928
8929 return strnlen(buf, count);
8930}
8931
8932static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8933
8934/*****************************************************************************
8935 *
8936 * driver setup and teardown
8937 *
8938 *****************************************************************************/
8939
bb8c093b 8940static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
8941{
8942 priv->workqueue = create_workqueue(DRV_NAME);
8943
8944 init_waitqueue_head(&priv->wait_command_queue);
8945
bb8c093b
CH
8946 INIT_WORK(&priv->up, iwl4965_bg_up);
8947 INIT_WORK(&priv->restart, iwl4965_bg_restart);
8948 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
8949 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
8950 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
8951 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
8952 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
8953 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
8954 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
8955 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
8956 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
8957 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
8958
8959 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
8960
8961 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 8962 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
8963}
8964
bb8c093b 8965static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c 8966{
bb8c093b 8967 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 8968
3ae6a054 8969 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
8970 cancel_delayed_work(&priv->scan_check);
8971 cancel_delayed_work(&priv->alive_start);
8972 cancel_delayed_work(&priv->post_associate);
8973 cancel_work_sync(&priv->beacon_update);
8974}
8975
bb8c093b 8976static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
8977 &dev_attr_antenna.attr,
8978 &dev_attr_channels.attr,
8979 &dev_attr_dump_errors.attr,
8980 &dev_attr_dump_events.attr,
8981 &dev_attr_flags.attr,
8982 &dev_attr_filter_flags.attr,
c8b0e6e1 8983#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8984 &dev_attr_measurement.attr,
8985#endif
8986 &dev_attr_power_level.attr,
8987 &dev_attr_retry_rate.attr,
8988 &dev_attr_rf_kill.attr,
8989 &dev_attr_rs_window.attr,
8990 &dev_attr_statistics.attr,
8991 &dev_attr_status.attr,
8992 &dev_attr_temperature.attr,
8993 &dev_attr_tune.attr,
8994 &dev_attr_tx_power.attr,
8995
8996 NULL
8997};
8998
bb8c093b 8999static struct attribute_group iwl4965_attribute_group = {
b481de9c 9000 .name = NULL, /* put in device directory */
bb8c093b 9001 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
9002};
9003
bb8c093b
CH
9004static struct ieee80211_ops iwl4965_hw_ops = {
9005 .tx = iwl4965_mac_tx,
9006 .start = iwl4965_mac_start,
9007 .stop = iwl4965_mac_stop,
9008 .add_interface = iwl4965_mac_add_interface,
9009 .remove_interface = iwl4965_mac_remove_interface,
9010 .config = iwl4965_mac_config,
9011 .config_interface = iwl4965_mac_config_interface,
9012 .configure_filter = iwl4965_configure_filter,
9013 .set_key = iwl4965_mac_set_key,
9014 .get_stats = iwl4965_mac_get_stats,
9015 .get_tx_stats = iwl4965_mac_get_tx_stats,
9016 .conf_tx = iwl4965_mac_conf_tx,
9017 .get_tsf = iwl4965_mac_get_tsf,
9018 .reset_tsf = iwl4965_mac_reset_tsf,
9019 .beacon_update = iwl4965_mac_beacon_update,
471b3efd 9020 .bss_info_changed = iwl4965_bss_info_changed,
c8b0e6e1 9021#ifdef CONFIG_IWL4965_HT
bb8c093b 9022 .conf_ht = iwl4965_mac_conf_ht,
9ab46173 9023 .ampdu_action = iwl4965_mac_ampdu_action,
c8b0e6e1 9024#ifdef CONFIG_IWL4965_HT_AGG
bb8c093b
CH
9025 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
9026 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
c8b0e6e1
CH
9027#endif /* CONFIG_IWL4965_HT_AGG */
9028#endif /* CONFIG_IWL4965_HT */
bb8c093b 9029 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
9030};
9031
bb8c093b 9032static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
9033{
9034 int err = 0;
bb8c093b 9035 struct iwl4965_priv *priv;
b481de9c
ZY
9036 struct ieee80211_hw *hw;
9037 int i;
5a66926a 9038 DECLARE_MAC_BUF(mac);
b481de9c 9039
6440adb5
BC
9040 /* Disabling hardware scan means that mac80211 will perform scans
9041 * "the hard way", rather than using device's scan. */
bb8c093b 9042 if (iwl4965_param_disable_hw_scan) {
b481de9c 9043 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 9044 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
9045 }
9046
bb8c093b
CH
9047 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) ||
9048 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) {
b481de9c
ZY
9049 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
9050 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
9051 err = -EINVAL;
9052 goto out;
9053 }
9054
9055 /* mac80211 allocates memory for this device instance, including
9056 * space for this driver's private structure */
bb8c093b 9057 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
b481de9c
ZY
9058 if (hw == NULL) {
9059 IWL_ERROR("Can not allocate network device\n");
9060 err = -ENOMEM;
9061 goto out;
9062 }
9063 SET_IEEE80211_DEV(hw, &pdev->dev);
9064
f51359a8
JB
9065 hw->rate_control_algorithm = "iwl-4965-rs";
9066
b481de9c
ZY
9067 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9068 priv = hw->priv;
9069 priv->hw = hw;
9070
9071 priv->pci_dev = pdev;
bb8c093b 9072 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna;
c8b0e6e1 9073#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9074 iwl4965_debug_level = iwl4965_param_debug;
b481de9c
ZY
9075 atomic_set(&priv->restrict_refcnt, 0);
9076#endif
9077 priv->retry_rate = 1;
9078
9079 priv->ibss_beacon = NULL;
9080
9081 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9082 * the range of signal quality values that we'll provide.
9083 * Negative values for level/noise indicate that we'll provide dBm.
9084 * For WE, at least, non-0 values here *enable* display of values
9085 * in app (iwconfig). */
9086 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9087 hw->max_noise = -20; /* noise level, negative indicates dBm */
9088 hw->max_signal = 100; /* link quality indication (%) */
9089
9090 /* Tell mac80211 our Tx characteristics */
9091 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
9092
6440adb5 9093 /* Default value; 4 EDCA QOS priorities */
b481de9c 9094 hw->queues = 4;
c8b0e6e1
CH
9095#ifdef CONFIG_IWL4965_HT
9096#ifdef CONFIG_IWL4965_HT_AGG
6440adb5 9097 /* Enhanced value; more queues, to support 11n aggregation */
b481de9c 9098 hw->queues = 16;
c8b0e6e1
CH
9099#endif /* CONFIG_IWL4965_HT_AGG */
9100#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
9101
9102 spin_lock_init(&priv->lock);
9103 spin_lock_init(&priv->power_data.lock);
9104 spin_lock_init(&priv->sta_lock);
9105 spin_lock_init(&priv->hcmd_lock);
9106 spin_lock_init(&priv->lq_mngr.lock);
9107
9108 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9109 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9110
9111 INIT_LIST_HEAD(&priv->free_frames);
9112
9113 mutex_init(&priv->mutex);
9114 if (pci_enable_device(pdev)) {
9115 err = -ENODEV;
9116 goto out_ieee80211_free_hw;
9117 }
9118
9119 pci_set_master(pdev);
9120
6440adb5 9121 /* Clear the driver's (not device's) station table */
bb8c093b 9122 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9123
9124 priv->data_retry_limit = -1;
9125 priv->ieee_channels = NULL;
9126 priv->ieee_rates = NULL;
9127 priv->phymode = -1;
9128
9129 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9130 if (!err)
9131 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9132 if (err) {
9133 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
9134 goto out_pci_disable_device;
9135 }
9136
9137 pci_set_drvdata(pdev, priv);
9138 err = pci_request_regions(pdev, DRV_NAME);
9139 if (err)
9140 goto out_pci_disable_device;
6440adb5 9141
b481de9c
ZY
9142 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9143 * PCI Tx retries from interfering with C3 CPU state */
9144 pci_write_config_byte(pdev, 0x41, 0x00);
6440adb5 9145
b481de9c
ZY
9146 priv->hw_base = pci_iomap(pdev, 0, 0);
9147 if (!priv->hw_base) {
9148 err = -ENODEV;
9149 goto out_pci_release_regions;
9150 }
9151
9152 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9153 (unsigned long long) pci_resource_len(pdev, 0));
9154 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9155
9156 /* Initialize module parameter values here */
9157
6440adb5 9158 /* Disable radio (SW RF KILL) via parameter when loading driver */
bb8c093b 9159 if (iwl4965_param_disable) {
b481de9c
ZY
9160 set_bit(STATUS_RF_KILL_SW, &priv->status);
9161 IWL_DEBUG_INFO("Radio disabled.\n");
9162 }
9163
9164 priv->iw_mode = IEEE80211_IF_TYPE_STA;
9165
9166 priv->ps_mode = 0;
9167 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
b481de9c
ZY
9168 priv->valid_antenna = 0x7; /* assume all 3 connected */
9169 priv->ps_mode = IWL_MIMO_PS_NONE;
b481de9c 9170
6440adb5 9171 /* Choose which receivers/antennas to use */
b481de9c
ZY
9172 iwl4965_set_rxon_chain(priv);
9173
9174 printk(KERN_INFO DRV_NAME
9175 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9176
9177 /* Device-specific setup */
bb8c093b 9178 if (iwl4965_hw_set_hw_setting(priv)) {
b481de9c 9179 IWL_ERROR("failed to set hw settings\n");
b481de9c
ZY
9180 goto out_iounmap;
9181 }
9182
c8b0e6e1 9183#ifdef CONFIG_IWL4965_QOS
bb8c093b 9184 if (iwl4965_param_qos_enable)
b481de9c
ZY
9185 priv->qos_data.qos_enable = 1;
9186
bb8c093b 9187 iwl4965_reset_qos(priv);
b481de9c
ZY
9188
9189 priv->qos_data.qos_active = 0;
9190 priv->qos_data.qos_cap.val = 0;
c8b0e6e1 9191#endif /* CONFIG_IWL4965_QOS */
b481de9c 9192
bb8c093b
CH
9193 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9194 iwl4965_setup_deferred_work(priv);
9195 iwl4965_setup_rx_handlers(priv);
b481de9c
ZY
9196
9197 priv->rates_mask = IWL_RATES_MASK;
9198 /* If power management is turned on, default to AC mode */
9199 priv->power_mode = IWL_POWER_AC;
9200 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9201
bb8c093b 9202 iwl4965_disable_interrupts(priv);
49df2b33 9203
bb8c093b 9204 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9205 if (err) {
9206 IWL_ERROR("failed to create sysfs device attributes\n");
b481de9c
ZY
9207 goto out_release_irq;
9208 }
9209
5a66926a
ZY
9210 /* nic init */
9211 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
9212 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
9213
9214 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9215 err = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
9216 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9217 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
9218 if (err < 0) {
9219 IWL_DEBUG_INFO("Failed to init the card\n");
9220 goto out_remove_sysfs;
9221 }
9222 /* Read the EEPROM */
9223 err = iwl4965_eeprom_init(priv);
b481de9c 9224 if (err) {
5a66926a
ZY
9225 IWL_ERROR("Unable to init EEPROM\n");
9226 goto out_remove_sysfs;
b481de9c 9227 }
5a66926a
ZY
9228 /* MAC Address location in EEPROM same for 3945/4965 */
9229 get_eeprom_mac(priv, priv->mac_addr);
9230 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
9231 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
b481de9c 9232
849e0dce
RC
9233 err = iwl4965_init_channel_map(priv);
9234 if (err) {
9235 IWL_ERROR("initializing regulatory failed: %d\n", err);
9236 goto out_remove_sysfs;
9237 }
9238
9239 err = iwl4965_init_geos(priv);
9240 if (err) {
9241 IWL_ERROR("initializing geos failed: %d\n", err);
9242 goto out_free_channel_map;
9243 }
9244 iwl4965_reset_channel_flag(priv);
9245
5a66926a
ZY
9246 iwl4965_rate_control_register(priv->hw);
9247 err = ieee80211_register_hw(priv->hw);
9248 if (err) {
9249 IWL_ERROR("Failed to register network device (error %d)\n", err);
849e0dce 9250 goto out_free_geos;
5a66926a 9251 }
b481de9c 9252
5a66926a
ZY
9253 priv->hw->conf.beacon_int = 100;
9254 priv->mac80211_registered = 1;
9255 pci_save_state(pdev);
9256 pci_disable_device(pdev);
b481de9c
ZY
9257
9258 return 0;
9259
849e0dce
RC
9260 out_free_geos:
9261 iwl4965_free_geos(priv);
9262 out_free_channel_map:
9263 iwl4965_free_channel_map(priv);
5a66926a 9264 out_remove_sysfs:
bb8c093b 9265 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9266
9267 out_release_irq:
b481de9c
ZY
9268 destroy_workqueue(priv->workqueue);
9269 priv->workqueue = NULL;
bb8c093b 9270 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
9271
9272 out_iounmap:
9273 pci_iounmap(pdev, priv->hw_base);
9274 out_pci_release_regions:
9275 pci_release_regions(pdev);
9276 out_pci_disable_device:
9277 pci_disable_device(pdev);
9278 pci_set_drvdata(pdev, NULL);
9279 out_ieee80211_free_hw:
9280 ieee80211_free_hw(priv->hw);
9281 out:
9282 return err;
9283}
9284
bb8c093b 9285static void iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 9286{
bb8c093b 9287 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9288 struct list_head *p, *q;
9289 int i;
9290
9291 if (!priv)
9292 return;
9293
9294 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9295
b481de9c 9296 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 9297
bb8c093b 9298 iwl4965_down(priv);
b481de9c
ZY
9299
9300 /* Free MAC hash list for ADHOC */
9301 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9302 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9303 list_del(p);
bb8c093b 9304 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
9305 }
9306 }
9307
bb8c093b 9308 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 9309
bb8c093b 9310 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
9311
9312 if (priv->rxq.bd)
bb8c093b
CH
9313 iwl4965_rx_queue_free(priv, &priv->rxq);
9314 iwl4965_hw_txq_ctx_free(priv);
b481de9c 9315
bb8c093b
CH
9316 iwl4965_unset_hw_setting(priv);
9317 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9318
9319 if (priv->mac80211_registered) {
9320 ieee80211_unregister_hw(priv->hw);
bb8c093b 9321 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
9322 }
9323
948c171c
MA
9324 /*netif_stop_queue(dev); */
9325 flush_workqueue(priv->workqueue);
9326
bb8c093b 9327 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
9328 * priv->workqueue... so we can't take down the workqueue
9329 * until now... */
9330 destroy_workqueue(priv->workqueue);
9331 priv->workqueue = NULL;
9332
b481de9c
ZY
9333 pci_iounmap(pdev, priv->hw_base);
9334 pci_release_regions(pdev);
9335 pci_disable_device(pdev);
9336 pci_set_drvdata(pdev, NULL);
9337
849e0dce
RC
9338 iwl4965_free_channel_map(priv);
9339 iwl4965_free_geos(priv);
b481de9c
ZY
9340
9341 if (priv->ibss_beacon)
9342 dev_kfree_skb(priv->ibss_beacon);
9343
9344 ieee80211_free_hw(priv->hw);
9345}
9346
9347#ifdef CONFIG_PM
9348
bb8c093b 9349static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 9350{
bb8c093b 9351 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9352
e655b9f0
ZY
9353 if (priv->is_open) {
9354 set_bit(STATUS_IN_SUSPEND, &priv->status);
9355 iwl4965_mac_stop(priv->hw);
9356 priv->is_open = 1;
9357 }
b481de9c 9358
b481de9c
ZY
9359 pci_set_power_state(pdev, PCI_D3hot);
9360
b481de9c
ZY
9361 return 0;
9362}
9363
bb8c093b 9364static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 9365{
bb8c093b 9366 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9367
b481de9c 9368 pci_set_power_state(pdev, PCI_D0);
b481de9c 9369
e655b9f0
ZY
9370 if (priv->is_open)
9371 iwl4965_mac_start(priv->hw);
b481de9c 9372
e655b9f0 9373 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
9374 return 0;
9375}
9376
9377#endif /* CONFIG_PM */
9378
9379/*****************************************************************************
9380 *
9381 * driver and module entry point
9382 *
9383 *****************************************************************************/
9384
bb8c093b 9385static struct pci_driver iwl4965_driver = {
b481de9c 9386 .name = DRV_NAME,
bb8c093b
CH
9387 .id_table = iwl4965_hw_card_ids,
9388 .probe = iwl4965_pci_probe,
9389 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 9390#ifdef CONFIG_PM
bb8c093b
CH
9391 .suspend = iwl4965_pci_suspend,
9392 .resume = iwl4965_pci_resume,
b481de9c
ZY
9393#endif
9394};
9395
bb8c093b 9396static int __init iwl4965_init(void)
b481de9c
ZY
9397{
9398
9399 int ret;
9400 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9401 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
bb8c093b 9402 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
9403 if (ret) {
9404 IWL_ERROR("Unable to initialize PCI module\n");
9405 return ret;
9406 }
c8b0e6e1 9407#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9408 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
9409 if (ret) {
9410 IWL_ERROR("Unable to create driver sysfs file\n");
bb8c093b 9411 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9412 return ret;
9413 }
9414#endif
9415
9416 return ret;
9417}
9418
bb8c093b 9419static void __exit iwl4965_exit(void)
b481de9c 9420{
c8b0e6e1 9421#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9422 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 9423#endif
bb8c093b 9424 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9425}
9426
bb8c093b 9427module_param_named(antenna, iwl4965_param_antenna, int, 0444);
b481de9c 9428MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
bb8c093b 9429module_param_named(disable, iwl4965_param_disable, int, 0444);
b481de9c 9430MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
bb8c093b 9431module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
b481de9c
ZY
9432MODULE_PARM_DESC(hwcrypto,
9433 "using hardware crypto engine (default 0 [software])\n");
bb8c093b 9434module_param_named(debug, iwl4965_param_debug, int, 0444);
b481de9c 9435MODULE_PARM_DESC(debug, "debug output mask");
bb8c093b 9436module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
b481de9c
ZY
9437MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9438
bb8c093b 9439module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
b481de9c
ZY
9440MODULE_PARM_DESC(queues_num, "number of hw queues.");
9441
9442/* QoS */
bb8c093b 9443module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
b481de9c 9444MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9ee1ba47
RR
9445module_param_named(amsdu_size_8K, iwl4965_param_amsdu_size_8K, int, 0444);
9446MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
b481de9c 9447
bb8c093b
CH
9448module_exit(iwl4965_exit);
9449module_init(iwl4965_init);