]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/irq/irqdesc.c
SELinux: Only return netlink error when we know the return is fatal
[net-next-2.6.git] / kernel / irq / irqdesc.c
CommitLineData
3795de23
TG
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
1f5a5b87 16#include <linux/bitmap.h>
3795de23
TG
17
18#include "internals.h"
19
20/*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
78f90d91 23static struct lock_class_key irq_desc_lock_class;
3795de23
TG
24
25#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26static void __init init_irq_default_affinity(void)
27{
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30}
31#else
32static void __init init_irq_default_affinity(void)
33{
34}
35#endif
36
1f5a5b87
TG
37#ifdef CONFIG_SMP
38static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39{
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43#ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48#endif
49 return 0;
50}
51
52static void desc_smp_init(struct irq_desc *desc, int node)
53{
aa99ec0f 54 desc->irq_data.node = node;
1f5a5b87 55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
b7b29338
TG
56#ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc->pending_mask);
58#endif
59}
60
61static inline int desc_node(struct irq_desc *desc)
62{
63 return desc->irq_data.node;
1f5a5b87
TG
64}
65
66#else
67static inline int
68alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69static inline void desc_smp_init(struct irq_desc *desc, int node) { }
b7b29338 70static inline int desc_node(struct irq_desc *desc) { return 0; }
1f5a5b87
TG
71#endif
72
73static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74{
75 desc->irq_data.irq = irq;
76 desc->irq_data.chip = &no_irq_chip;
77 desc->irq_data.chip_data = NULL;
78 desc->irq_data.handler_data = NULL;
79 desc->irq_data.msi_desc = NULL;
80 desc->status = IRQ_DEFAULT_INIT_FLAGS;
81 desc->handle_irq = handle_bad_irq;
82 desc->depth = 1;
b7b29338
TG
83 desc->irq_count = 0;
84 desc->irqs_unhandled = 0;
1f5a5b87
TG
85 desc->name = NULL;
86 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
87 desc_smp_init(desc, node);
88}
89
3795de23
TG
90int nr_irqs = NR_IRQS;
91EXPORT_SYMBOL_GPL(nr_irqs);
92
a05a900a 93static DEFINE_MUTEX(sparse_irq_lock);
1f5a5b87
TG
94static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
95
3795de23
TG
96#ifdef CONFIG_SPARSE_IRQ
97
baa0d233 98static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
3795de23 99
1f5a5b87 100static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
3795de23
TG
101{
102 radix_tree_insert(&irq_desc_tree, irq, desc);
103}
104
105struct irq_desc *irq_to_desc(unsigned int irq)
106{
107 return radix_tree_lookup(&irq_desc_tree, irq);
108}
109
1f5a5b87
TG
110static void delete_irq_desc(unsigned int irq)
111{
112 radix_tree_delete(&irq_desc_tree, irq);
113}
114
115#ifdef CONFIG_SMP
116static void free_masks(struct irq_desc *desc)
117{
118#ifdef CONFIG_GENERIC_PENDING_IRQ
119 free_cpumask_var(desc->pending_mask);
120#endif
c0a19ebc 121 free_cpumask_var(desc->irq_data.affinity);
1f5a5b87
TG
122}
123#else
124static inline void free_masks(struct irq_desc *desc) { }
125#endif
126
127static struct irq_desc *alloc_desc(int irq, int node)
128{
129 struct irq_desc *desc;
baa0d233 130 gfp_t gfp = GFP_KERNEL;
1f5a5b87
TG
131
132 desc = kzalloc_node(sizeof(*desc), gfp, node);
133 if (!desc)
134 return NULL;
135 /* allocate based on nr_cpu_ids */
136 desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
137 gfp, node);
138 if (!desc->kstat_irqs)
139 goto err_desc;
140
141 if (alloc_masks(desc, gfp, node))
142 goto err_kstat;
143
144 raw_spin_lock_init(&desc->lock);
145 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
146
147 desc_set_defaults(irq, desc, node);
148
149 return desc;
150
151err_kstat:
152 kfree(desc->kstat_irqs);
153err_desc:
154 kfree(desc);
155 return NULL;
156}
157
158static void free_desc(unsigned int irq)
159{
160 struct irq_desc *desc = irq_to_desc(irq);
1f5a5b87 161
13bfe99e
TG
162 unregister_irq_proc(irq, desc);
163
a05a900a 164 mutex_lock(&sparse_irq_lock);
1f5a5b87 165 delete_irq_desc(irq);
a05a900a 166 mutex_unlock(&sparse_irq_lock);
1f5a5b87
TG
167
168 free_masks(desc);
169 kfree(desc->kstat_irqs);
170 kfree(desc);
171}
172
173static int alloc_descs(unsigned int start, unsigned int cnt, int node)
174{
175 struct irq_desc *desc;
1f5a5b87
TG
176 int i;
177
178 for (i = 0; i < cnt; i++) {
179 desc = alloc_desc(start + i, node);
180 if (!desc)
181 goto err;
a05a900a 182 mutex_lock(&sparse_irq_lock);
1f5a5b87 183 irq_insert_desc(start + i, desc);
a05a900a 184 mutex_unlock(&sparse_irq_lock);
1f5a5b87
TG
185 }
186 return start;
187
188err:
189 for (i--; i >= 0; i--)
190 free_desc(start + i);
191
a05a900a 192 mutex_lock(&sparse_irq_lock);
1f5a5b87 193 bitmap_clear(allocated_irqs, start, cnt);
a05a900a 194 mutex_unlock(&sparse_irq_lock);
1f5a5b87
TG
195 return -ENOMEM;
196}
197
aa99ec0f
TG
198struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
199{
200 int res = irq_alloc_descs(irq, irq, 1, node);
3795de23 201
aa99ec0f
TG
202 if (res == -EEXIST || res == irq)
203 return irq_to_desc(irq);
204 return NULL;
205}
3795de23
TG
206
207int __init early_irq_init(void)
208{
b683de2b 209 int i, initcnt, node = first_online_node;
3795de23 210 struct irq_desc *desc;
3795de23
TG
211
212 init_irq_default_affinity();
213
b683de2b
TG
214 /* Let arch update nr_irqs and return the nr of preallocated irqs */
215 initcnt = arch_probe_nr_irqs();
216 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
3795de23 217
b683de2b 218 for (i = 0; i < initcnt; i++) {
aa99ec0f
TG
219 desc = alloc_desc(i, node);
220 set_bit(i, allocated_irqs);
221 irq_insert_desc(i, desc);
3795de23 222 }
3795de23
TG
223 return arch_early_irq_init();
224}
225
3795de23
TG
226#else /* !CONFIG_SPARSE_IRQ */
227
228struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
229 [0 ... NR_IRQS-1] = {
1318a481 230 .status = IRQ_DEFAULT_INIT_FLAGS,
3795de23
TG
231 .handle_irq = handle_bad_irq,
232 .depth = 1,
233 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
234 }
235};
236
237static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
238int __init early_irq_init(void)
239{
aa99ec0f 240 int count, i, node = first_online_node;
3795de23 241 struct irq_desc *desc;
3795de23
TG
242
243 init_irq_default_affinity();
244
245 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
246
247 desc = irq_desc;
248 count = ARRAY_SIZE(irq_desc);
249
250 for (i = 0; i < count; i++) {
251 desc[i].irq_data.irq = i;
252 desc[i].irq_data.chip = &no_irq_chip;
3795de23 253 desc[i].kstat_irqs = kstat_irqs_all[i];
aa99ec0f
TG
254 alloc_masks(desc + i, GFP_KERNEL, node);
255 desc_smp_init(desc + i, node);
154cd387 256 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
3795de23
TG
257 }
258 return arch_early_irq_init();
259}
260
261struct irq_desc *irq_to_desc(unsigned int irq)
262{
263 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
264}
265
266struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
267{
268 return irq_to_desc(irq);
269}
1f5a5b87 270
1f5a5b87
TG
271static void free_desc(unsigned int irq)
272{
b7b29338 273 dynamic_irq_cleanup(irq);
1f5a5b87
TG
274}
275
276static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
277{
278 return start;
279}
3795de23
TG
280#endif /* !CONFIG_SPARSE_IRQ */
281
1f5a5b87
TG
282/* Dynamic interrupt handling */
283
284/**
285 * irq_free_descs - free irq descriptors
286 * @from: Start of descriptor range
287 * @cnt: Number of consecutive irqs to free
288 */
289void irq_free_descs(unsigned int from, unsigned int cnt)
290{
1f5a5b87
TG
291 int i;
292
293 if (from >= nr_irqs || (from + cnt) > nr_irqs)
294 return;
295
296 for (i = 0; i < cnt; i++)
297 free_desc(from + i);
298
a05a900a 299 mutex_lock(&sparse_irq_lock);
1f5a5b87 300 bitmap_clear(allocated_irqs, from, cnt);
a05a900a 301 mutex_unlock(&sparse_irq_lock);
1f5a5b87
TG
302}
303
304/**
305 * irq_alloc_descs - allocate and initialize a range of irq descriptors
306 * @irq: Allocate for specific irq number if irq >= 0
307 * @from: Start the search from this irq number
308 * @cnt: Number of consecutive irqs to allocate.
309 * @node: Preferred node on which the irq descriptor should be allocated
310 *
311 * Returns the first irq number or error code
312 */
313int __ref
314irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
315{
1f5a5b87
TG
316 int start, ret;
317
318 if (!cnt)
319 return -EINVAL;
320
a05a900a 321 mutex_lock(&sparse_irq_lock);
1f5a5b87
TG
322
323 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
324 ret = -EEXIST;
325 if (irq >=0 && start != irq)
326 goto err;
327
328 ret = -ENOMEM;
329 if (start >= nr_irqs)
330 goto err;
331
332 bitmap_set(allocated_irqs, start, cnt);
a05a900a 333 mutex_unlock(&sparse_irq_lock);
1f5a5b87
TG
334 return alloc_descs(start, cnt, node);
335
336err:
a05a900a 337 mutex_unlock(&sparse_irq_lock);
1f5a5b87
TG
338 return ret;
339}
340
06f6c339
TG
341/**
342 * irq_reserve_irqs - mark irqs allocated
343 * @from: mark from irq number
344 * @cnt: number of irqs to mark
345 *
346 * Returns 0 on success or an appropriate error code
347 */
348int irq_reserve_irqs(unsigned int from, unsigned int cnt)
349{
06f6c339
TG
350 unsigned int start;
351 int ret = 0;
352
353 if (!cnt || (from + cnt) > nr_irqs)
354 return -EINVAL;
355
a05a900a 356 mutex_lock(&sparse_irq_lock);
06f6c339
TG
357 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
358 if (start == from)
359 bitmap_set(allocated_irqs, start, cnt);
360 else
361 ret = -EEXIST;
a05a900a 362 mutex_unlock(&sparse_irq_lock);
06f6c339
TG
363 return ret;
364}
365
a98d24b7
TG
366/**
367 * irq_get_next_irq - get next allocated irq number
368 * @offset: where to start the search
369 *
370 * Returns next irq number after offset or nr_irqs if none is found.
371 */
372unsigned int irq_get_next_irq(unsigned int offset)
373{
374 return find_next_bit(allocated_irqs, nr_irqs, offset);
375}
376
b7b29338
TG
377/**
378 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
379 * @irq: irq number to initialize
380 */
381void dynamic_irq_cleanup(unsigned int irq)
3795de23 382{
b7b29338
TG
383 struct irq_desc *desc = irq_to_desc(irq);
384 unsigned long flags;
385
386 raw_spin_lock_irqsave(&desc->lock, flags);
387 desc_set_defaults(irq, desc, desc_node(desc));
388 raw_spin_unlock_irqrestore(&desc->lock, flags);
3795de23
TG
389}
390
3795de23
TG
391unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
392{
393 struct irq_desc *desc = irq_to_desc(irq);
394 return desc ? desc->kstat_irqs[cpu] : 0;
395}
478735e3
KH
396
397#ifdef CONFIG_GENERIC_HARDIRQS
398unsigned int kstat_irqs(unsigned int irq)
399{
400 struct irq_desc *desc = irq_to_desc(irq);
401 int cpu;
402 int sum = 0;
403
404 if (!desc)
405 return 0;
406 for_each_possible_cpu(cpu)
407 sum += desc->kstat_irqs[cpu];
408 return sum;
409}
410#endif /* CONFIG_GENERIC_HARDIRQS */