]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/rfkill/rfkill-input.c
nl80211: use GFP_ATOMIC for michael mic failure message
[net-next-2.6.git] / net / rfkill / rfkill-input.c
1 /*
2  * Input layer to RF Kill interface connector
3  *
4  * Copyright (c) 2007 Dmitry Torokhov
5  */
6
7 /*
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published
10  * by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/input.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/init.h>
18 #include <linux/rfkill.h>
19 #include <linux/sched.h>
20
21 #include "rfkill-input.h"
22
23 MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
24 MODULE_DESCRIPTION("Input layer to RF switch connector");
25 MODULE_LICENSE("GPL");
26
27 enum rfkill_input_master_mode {
28         RFKILL_INPUT_MASTER_DONOTHING = 0,
29         RFKILL_INPUT_MASTER_RESTORE = 1,
30         RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
31         RFKILL_INPUT_MASTER_MAX,        /* marker */
32 };
33
34 /* Delay (in ms) between consecutive switch ops */
35 #define RFKILL_OPS_DELAY 200
36
37 static enum rfkill_input_master_mode rfkill_master_switch_mode =
38                                         RFKILL_INPUT_MASTER_UNBLOCKALL;
39 module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
40 MODULE_PARM_DESC(master_switch_mode,
41         "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
42
43 enum rfkill_global_sched_op {
44         RFKILL_GLOBAL_OP_EPO = 0,
45         RFKILL_GLOBAL_OP_RESTORE,
46         RFKILL_GLOBAL_OP_UNLOCK,
47         RFKILL_GLOBAL_OP_UNBLOCK,
48 };
49
50 struct rfkill_task {
51         struct delayed_work dwork;
52
53         /* ensures that task is serialized */
54         struct mutex mutex;
55
56         /* protects everything below */
57         spinlock_t lock;
58
59         /* pending regular switch operations (1=pending) */
60         unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
61
62         /* should the state be complemented (1=yes) */
63         unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
64
65         bool global_op_pending;
66         enum rfkill_global_sched_op op;
67
68         /* last time it was scheduled */
69         unsigned long last_scheduled;
70 };
71
72 static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
73 {
74         unsigned int i;
75
76         switch (op) {
77         case RFKILL_GLOBAL_OP_EPO:
78                 rfkill_epo();
79                 break;
80         case RFKILL_GLOBAL_OP_RESTORE:
81                 rfkill_restore_states();
82                 break;
83         case RFKILL_GLOBAL_OP_UNLOCK:
84                 rfkill_remove_epo_lock();
85                 break;
86         case RFKILL_GLOBAL_OP_UNBLOCK:
87                 rfkill_remove_epo_lock();
88                 for (i = 0; i < RFKILL_TYPE_MAX; i++)
89                         rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
90                 break;
91         default:
92                 /* memory corruption or bug, fail safely */
93                 rfkill_epo();
94                 WARN(1, "Unknown requested operation %d! "
95                         "rfkill Emergency Power Off activated\n",
96                         op);
97         }
98 }
99
100 static void __rfkill_handle_normal_op(const enum rfkill_type type,
101                         const bool c)
102 {
103         enum rfkill_state state;
104
105         state = rfkill_get_global_state(type);
106         if (c)
107                 state = rfkill_state_complement(state);
108
109         rfkill_switch_all(type, state);
110 }
111
112 static void rfkill_task_handler(struct work_struct *work)
113 {
114         struct rfkill_task *task = container_of(work,
115                                         struct rfkill_task, dwork.work);
116         bool doit = true;
117
118         mutex_lock(&task->mutex);
119
120         spin_lock_irq(&task->lock);
121         while (doit) {
122                 if (task->global_op_pending) {
123                         enum rfkill_global_sched_op op = task->op;
124                         task->global_op_pending = false;
125                         memset(task->sw_pending, 0, sizeof(task->sw_pending));
126                         spin_unlock_irq(&task->lock);
127
128                         __rfkill_handle_global_op(op);
129
130                         /* make sure we do at least one pass with
131                          * !task->global_op_pending */
132                         spin_lock_irq(&task->lock);
133                         continue;
134                 } else if (!rfkill_is_epo_lock_active()) {
135                         unsigned int i = 0;
136
137                         while (!task->global_op_pending &&
138                                                 i < RFKILL_TYPE_MAX) {
139                                 if (test_and_clear_bit(i, task->sw_pending)) {
140                                         bool c;
141                                         c = test_and_clear_bit(i,
142                                                         task->sw_togglestate);
143                                         spin_unlock_irq(&task->lock);
144
145                                         __rfkill_handle_normal_op(i, c);
146
147                                         spin_lock_irq(&task->lock);
148                                 }
149                                 i++;
150                         }
151                 }
152                 doit = task->global_op_pending;
153         }
154         spin_unlock_irq(&task->lock);
155
156         mutex_unlock(&task->mutex);
157 }
158
159 static struct rfkill_task rfkill_task = {
160         .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
161                                 rfkill_task_handler),
162         .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
163         .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
164 };
165
166 static unsigned long rfkill_ratelimit(const unsigned long last)
167 {
168         const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
169         return (time_after(jiffies, last + delay)) ? 0 : delay;
170 }
171
172 static void rfkill_schedule_ratelimited(void)
173 {
174         if (!delayed_work_pending(&rfkill_task.dwork)) {
175                 schedule_delayed_work(&rfkill_task.dwork,
176                                 rfkill_ratelimit(rfkill_task.last_scheduled));
177                 rfkill_task.last_scheduled = jiffies;
178         }
179 }
180
181 static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
182 {
183         unsigned long flags;
184
185         spin_lock_irqsave(&rfkill_task.lock, flags);
186         rfkill_task.op = op;
187         rfkill_task.global_op_pending = true;
188         if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
189                 /* bypass the limiter for EPO */
190                 cancel_delayed_work(&rfkill_task.dwork);
191                 schedule_delayed_work(&rfkill_task.dwork, 0);
192                 rfkill_task.last_scheduled = jiffies;
193         } else
194                 rfkill_schedule_ratelimited();
195         spin_unlock_irqrestore(&rfkill_task.lock, flags);
196 }
197
198 static void rfkill_schedule_toggle(enum rfkill_type type)
199 {
200         unsigned long flags;
201
202         if (rfkill_is_epo_lock_active())
203                 return;
204
205         spin_lock_irqsave(&rfkill_task.lock, flags);
206         if (!rfkill_task.global_op_pending) {
207                 set_bit(type, rfkill_task.sw_pending);
208                 change_bit(type, rfkill_task.sw_togglestate);
209                 rfkill_schedule_ratelimited();
210         }
211         spin_unlock_irqrestore(&rfkill_task.lock, flags);
212 }
213
214 static void rfkill_schedule_evsw_rfkillall(int state)
215 {
216         if (state) {
217                 switch (rfkill_master_switch_mode) {
218                 case RFKILL_INPUT_MASTER_UNBLOCKALL:
219                         rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
220                         break;
221                 case RFKILL_INPUT_MASTER_RESTORE:
222                         rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
223                         break;
224                 case RFKILL_INPUT_MASTER_DONOTHING:
225                         rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
226                         break;
227                 default:
228                         /* memory corruption or driver bug! fail safely */
229                         rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
230                         WARN(1, "Unknown rfkill_master_switch_mode (%d), "
231                                 "driver bug or memory corruption detected!\n",
232                                 rfkill_master_switch_mode);
233                         break;
234                 }
235         } else
236                 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
237 }
238
239 static void rfkill_event(struct input_handle *handle, unsigned int type,
240                         unsigned int code, int data)
241 {
242         if (type == EV_KEY && data == 1) {
243                 enum rfkill_type t;
244
245                 switch (code) {
246                 case KEY_WLAN:
247                         t = RFKILL_TYPE_WLAN;
248                         break;
249                 case KEY_BLUETOOTH:
250                         t = RFKILL_TYPE_BLUETOOTH;
251                         break;
252                 case KEY_UWB:
253                         t = RFKILL_TYPE_UWB;
254                         break;
255                 case KEY_WIMAX:
256                         t = RFKILL_TYPE_WIMAX;
257                         break;
258                 default:
259                         return;
260                 }
261                 rfkill_schedule_toggle(t);
262                 return;
263         } else if (type == EV_SW) {
264                 switch (code) {
265                 case SW_RFKILL_ALL:
266                         rfkill_schedule_evsw_rfkillall(data);
267                         return;
268                 default:
269                         return;
270                 }
271         }
272 }
273
274 static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
275                           const struct input_device_id *id)
276 {
277         struct input_handle *handle;
278         int error;
279
280         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
281         if (!handle)
282                 return -ENOMEM;
283
284         handle->dev = dev;
285         handle->handler = handler;
286         handle->name = "rfkill";
287
288         /* causes rfkill_start() to be called */
289         error = input_register_handle(handle);
290         if (error)
291                 goto err_free_handle;
292
293         error = input_open_device(handle);
294         if (error)
295                 goto err_unregister_handle;
296
297         return 0;
298
299  err_unregister_handle:
300         input_unregister_handle(handle);
301  err_free_handle:
302         kfree(handle);
303         return error;
304 }
305
306 static void rfkill_start(struct input_handle *handle)
307 {
308         /* Take event_lock to guard against configuration changes, we
309          * should be able to deal with concurrency with rfkill_event()
310          * just fine (which event_lock will also avoid). */
311         spin_lock_irq(&handle->dev->event_lock);
312
313         if (test_bit(EV_SW, handle->dev->evbit)) {
314                 if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
315                         rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
316                                                         handle->dev->sw));
317                 /* add resync for further EV_SW events here */
318         }
319
320         spin_unlock_irq(&handle->dev->event_lock);
321 }
322
323 static void rfkill_disconnect(struct input_handle *handle)
324 {
325         input_close_device(handle);
326         input_unregister_handle(handle);
327         kfree(handle);
328 }
329
330 static const struct input_device_id rfkill_ids[] = {
331         {
332                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
333                 .evbit = { BIT_MASK(EV_KEY) },
334                 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
335         },
336         {
337                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
338                 .evbit = { BIT_MASK(EV_KEY) },
339                 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
340         },
341         {
342                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
343                 .evbit = { BIT_MASK(EV_KEY) },
344                 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
345         },
346         {
347                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
348                 .evbit = { BIT_MASK(EV_KEY) },
349                 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
350         },
351         {
352                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
353                 .evbit = { BIT(EV_SW) },
354                 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
355         },
356         { }
357 };
358
359 static struct input_handler rfkill_handler = {
360         .event =        rfkill_event,
361         .connect =      rfkill_connect,
362         .disconnect =   rfkill_disconnect,
363         .start =        rfkill_start,
364         .name =         "rfkill",
365         .id_table =     rfkill_ids,
366 };
367
368 static int __init rfkill_handler_init(void)
369 {
370         if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
371                 return -EINVAL;
372
373         /*
374          * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
375          * at the first use.  Acceptable, but if we can avoid it, why not?
376          */
377         rfkill_task.last_scheduled =
378                         jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
379         return input_register_handler(&rfkill_handler);
380 }
381
382 static void __exit rfkill_handler_exit(void)
383 {
384         input_unregister_handler(&rfkill_handler);
385         cancel_delayed_work_sync(&rfkill_task.dwork);
386         rfkill_remove_epo_lock();
387 }
388
389 module_init(rfkill_handler_init);
390 module_exit(rfkill_handler_exit);