]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - kernel/irq/migration.c
3c59x: fix build failure on !CONFIG_PCI
[net-next-2.6.git] / kernel / irq / migration.c
... / ...
CommitLineData
1
2#include <linux/irq.h>
3#include <linux/interrupt.h>
4
5#include "internals.h"
6
7void move_masked_irq(int irq)
8{
9 struct irq_desc *desc = irq_to_desc(irq);
10 struct irq_chip *chip = desc->irq_data.chip;
11
12 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
13 return;
14
15 /*
16 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
17 */
18 if (CHECK_IRQ_PER_CPU(desc->status)) {
19 WARN_ON(1);
20 return;
21 }
22
23 desc->status &= ~IRQ_MOVE_PENDING;
24
25 if (unlikely(cpumask_empty(desc->pending_mask)))
26 return;
27
28 if (!chip->irq_set_affinity)
29 return;
30
31 assert_raw_spin_locked(&desc->lock);
32
33 /*
34 * If there was a valid mask to work with, please
35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte
38 * when an active trigger is comming in. This could
39 * cause some ioapics to mal-function.
40 * Being paranoid i guess!
41 *
42 * For correct operation this depends on the caller
43 * masking the irqs.
44 */
45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
46 < nr_cpu_ids))
47 if (!chip->irq_set_affinity(&desc->irq_data,
48 desc->pending_mask, false)) {
49 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
50 irq_set_thread_affinity(desc);
51 }
52
53 cpumask_clear(desc->pending_mask);
54}
55
56void move_native_irq(int irq)
57{
58 struct irq_desc *desc = irq_to_desc(irq);
59
60 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
61 return;
62
63 if (unlikely(desc->status & IRQ_DISABLED))
64 return;
65
66 desc->irq_data.chip->irq_mask(&desc->irq_data);
67 move_masked_irq(irq);
68 desc->irq_data.chip->irq_unmask(&desc->irq_data);
69}
70