]>
Commit | Line | Data |
---|---|---|
2ea5bc5e | 1 | /* |
ba180fd4 | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
3 | * Licensed under the GPL |
4 | * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c: | |
5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | |
6 | */ | |
7 | ||
ba180fd4 JD |
8 | #include "linux/cpumask.h" |
9 | #include "linux/hardirq.h" | |
1da177e4 | 10 | #include "linux/interrupt.h" |
ba180fd4 JD |
11 | #include "linux/kernel_stat.h" |
12 | #include "linux/module.h" | |
d43c36dc | 13 | #include "linux/sched.h" |
1da177e4 | 14 | #include "linux/seq_file.h" |
ba180fd4 | 15 | #include "as-layout.h" |
1da177e4 | 16 | #include "kern_util.h" |
75e5584c | 17 | #include "os.h" |
1da177e4 LT |
18 | |
19 | /* | |
20 | * Generic, controller-independent functions: | |
21 | */ | |
22 | ||
23 | int show_interrupts(struct seq_file *p, void *v) | |
24 | { | |
25 | int i = *(loff_t *) v, j; | |
26 | struct irqaction * action; | |
27 | unsigned long flags; | |
28 | ||
29 | if (i == 0) { | |
30 | seq_printf(p, " "); | |
31 | for_each_online_cpu(j) | |
32 | seq_printf(p, "CPU%d ",j); | |
33 | seq_putc(p, '\n'); | |
34 | } | |
35 | ||
36 | if (i < NR_IRQS) { | |
239007b8 | 37 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
1da177e4 | 38 | action = irq_desc[i].action; |
2ea5bc5e | 39 | if (!action) |
1da177e4 LT |
40 | goto skip; |
41 | seq_printf(p, "%3d: ",i); | |
42 | #ifndef CONFIG_SMP | |
43 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
44 | #else | |
45 | for_each_online_cpu(j) | |
dee4102a | 46 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
1da177e4 | 47 | #endif |
d1bef4ed | 48 | seq_printf(p, " %14s", irq_desc[i].chip->typename); |
1da177e4 LT |
49 | seq_printf(p, " %s", action->name); |
50 | ||
51 | for (action=action->next; action; action = action->next) | |
52 | seq_printf(p, ", %s", action->name); | |
53 | ||
54 | seq_putc(p, '\n'); | |
55 | skip: | |
239007b8 | 56 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
ba180fd4 | 57 | } else if (i == NR_IRQS) |
1da177e4 | 58 | seq_putc(p, '\n'); |
1da177e4 LT |
59 | |
60 | return 0; | |
61 | } | |
62 | ||
d973a77b JD |
63 | /* |
64 | * This list is accessed under irq_lock, except in sigio_handler, | |
65 | * where it is safe from being modified. IRQ handlers won't change it - | |
66 | * if an IRQ source has vanished, it will be freed by free_irqs just | |
67 | * before returning from sigio_handler. That will process a separate | |
68 | * list of irqs to free, with its own locking, coming back here to | |
69 | * remove list elements, taking the irq_lock to do so. | |
70 | */ | |
f2e62992 | 71 | static struct irq_fd *active_fds = NULL; |
9b4f018d JD |
72 | static struct irq_fd **last_irq_ptr = &active_fds; |
73 | ||
74 | extern void free_irqs(void); | |
75 | ||
77bf4400 | 76 | void sigio_handler(int sig, struct uml_pt_regs *regs) |
9b4f018d JD |
77 | { |
78 | struct irq_fd *irq_fd; | |
79 | int n; | |
80 | ||
191ef966 JJ |
81 | if (smp_sigio_handler()) |
82 | return; | |
83 | ||
84 | while (1) { | |
9b4f018d JD |
85 | n = os_waiting_for_events(active_fds); |
86 | if (n <= 0) { | |
ba180fd4 JD |
87 | if (n == -EINTR) |
88 | continue; | |
9b4f018d JD |
89 | else break; |
90 | } | |
91 | ||
ba180fd4 JD |
92 | for (irq_fd = active_fds; irq_fd != NULL; |
93 | irq_fd = irq_fd->next) { | |
191ef966 | 94 | if (irq_fd->current_events != 0) { |
9b4f018d JD |
95 | irq_fd->current_events = 0; |
96 | do_IRQ(irq_fd->irq, regs); | |
97 | } | |
98 | } | |
99 | } | |
100 | ||
101 | free_irqs(); | |
102 | } | |
103 | ||
bfaafd71 JD |
104 | static DEFINE_SPINLOCK(irq_lock); |
105 | ||
4c182ae7 | 106 | static int activate_fd(int irq, int fd, int type, void *dev_id) |
9b4f018d JD |
107 | { |
108 | struct pollfd *tmp_pfd; | |
109 | struct irq_fd *new_fd, *irq_fd; | |
110 | unsigned long flags; | |
bf8fde78 | 111 | int events, err, n; |
9b4f018d | 112 | |
bf8fde78 | 113 | err = os_set_fd_async(fd); |
191ef966 | 114 | if (err < 0) |
9b4f018d JD |
115 | goto out; |
116 | ||
9b4f018d | 117 | err = -ENOMEM; |
f2e62992 | 118 | new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL); |
191ef966 | 119 | if (new_fd == NULL) |
9b4f018d JD |
120 | goto out; |
121 | ||
191ef966 JJ |
122 | if (type == IRQ_READ) |
123 | events = UM_POLLIN | UM_POLLPRI; | |
ba180fd4 | 124 | else events = UM_POLLOUT; |
9b4f018d JD |
125 | *new_fd = ((struct irq_fd) { .next = NULL, |
126 | .id = dev_id, | |
127 | .fd = fd, | |
128 | .type = type, | |
129 | .irq = irq, | |
9b4f018d JD |
130 | .events = events, |
131 | .current_events = 0 } ); | |
132 | ||
0f97869d | 133 | err = -EBUSY; |
bfaafd71 | 134 | spin_lock_irqsave(&irq_lock, flags); |
191ef966 JJ |
135 | for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { |
136 | if ((irq_fd->fd == fd) && (irq_fd->type == type)) { | |
ba180fd4 JD |
137 | printk(KERN_ERR "Registering fd %d twice\n", fd); |
138 | printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq); | |
139 | printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id, | |
140 | dev_id); | |
9b4f018d JD |
141 | goto out_unlock; |
142 | } | |
143 | } | |
144 | ||
191ef966 | 145 | if (type == IRQ_WRITE) |
9b4f018d JD |
146 | fd = -1; |
147 | ||
148 | tmp_pfd = NULL; | |
149 | n = 0; | |
150 | ||
191ef966 | 151 | while (1) { |
9b4f018d JD |
152 | n = os_create_pollfd(fd, events, tmp_pfd, n); |
153 | if (n == 0) | |
154 | break; | |
155 | ||
ba180fd4 JD |
156 | /* |
157 | * n > 0 | |
9b4f018d JD |
158 | * It means we couldn't put new pollfd to current pollfds |
159 | * and tmp_fds is NULL or too small for new pollfds array. | |
160 | * Needed size is equal to n as minimum. | |
161 | * | |
162 | * Here we have to drop the lock in order to call | |
163 | * kmalloc, which might sleep. | |
164 | * If something else came in and changed the pollfds array | |
165 | * so we will not be able to put new pollfd struct to pollfds | |
166 | * then we free the buffer tmp_fds and try again. | |
167 | */ | |
bfaafd71 | 168 | spin_unlock_irqrestore(&irq_lock, flags); |
191ef966 | 169 | kfree(tmp_pfd); |
9b4f018d | 170 | |
f2e62992 | 171 | tmp_pfd = kmalloc(n, GFP_KERNEL); |
9b4f018d JD |
172 | if (tmp_pfd == NULL) |
173 | goto out_kfree; | |
174 | ||
bfaafd71 | 175 | spin_lock_irqsave(&irq_lock, flags); |
9b4f018d | 176 | } |
9b4f018d JD |
177 | |
178 | *last_irq_ptr = new_fd; | |
179 | last_irq_ptr = &new_fd->next; | |
180 | ||
bfaafd71 | 181 | spin_unlock_irqrestore(&irq_lock, flags); |
9b4f018d | 182 | |
ba180fd4 JD |
183 | /* |
184 | * This calls activate_fd, so it has to be outside the critical | |
9b4f018d JD |
185 | * section. |
186 | */ | |
8e64d96a | 187 | maybe_sigio_broken(fd, (type == IRQ_READ)); |
9b4f018d | 188 | |
19bdf040 | 189 | return 0; |
9b4f018d JD |
190 | |
191 | out_unlock: | |
bfaafd71 | 192 | spin_unlock_irqrestore(&irq_lock, flags); |
9b4f018d JD |
193 | out_kfree: |
194 | kfree(new_fd); | |
195 | out: | |
19bdf040 | 196 | return err; |
9b4f018d JD |
197 | } |
198 | ||
199 | static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) | |
200 | { | |
201 | unsigned long flags; | |
202 | ||
bfaafd71 | 203 | spin_lock_irqsave(&irq_lock, flags); |
9b4f018d | 204 | os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr); |
bfaafd71 | 205 | spin_unlock_irqrestore(&irq_lock, flags); |
9b4f018d JD |
206 | } |
207 | ||
208 | struct irq_and_dev { | |
209 | int irq; | |
210 | void *dev; | |
211 | }; | |
212 | ||
213 | static int same_irq_and_dev(struct irq_fd *irq, void *d) | |
214 | { | |
215 | struct irq_and_dev *data = d; | |
216 | ||
191ef966 | 217 | return ((irq->irq == data->irq) && (irq->id == data->dev)); |
9b4f018d JD |
218 | } |
219 | ||
4c182ae7 | 220 | static void free_irq_by_irq_and_dev(unsigned int irq, void *dev) |
9b4f018d JD |
221 | { |
222 | struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, | |
223 | .dev = dev }); | |
224 | ||
225 | free_irq_by_cb(same_irq_and_dev, &data); | |
226 | } | |
227 | ||
228 | static int same_fd(struct irq_fd *irq, void *fd) | |
229 | { | |
191ef966 | 230 | return (irq->fd == *((int *)fd)); |
9b4f018d JD |
231 | } |
232 | ||
233 | void free_irq_by_fd(int fd) | |
234 | { | |
235 | free_irq_by_cb(same_fd, &fd); | |
236 | } | |
237 | ||
d973a77b | 238 | /* Must be called with irq_lock held */ |
9b4f018d JD |
239 | static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) |
240 | { | |
241 | struct irq_fd *irq; | |
242 | int i = 0; | |
243 | int fdi; | |
244 | ||
191ef966 JJ |
245 | for (irq = active_fds; irq != NULL; irq = irq->next) { |
246 | if ((irq->fd == fd) && (irq->irq == irqnum)) | |
247 | break; | |
9b4f018d JD |
248 | i++; |
249 | } | |
191ef966 | 250 | if (irq == NULL) { |
ba180fd4 JD |
251 | printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n", |
252 | fd); | |
9b4f018d JD |
253 | goto out; |
254 | } | |
255 | fdi = os_get_pollfd(i); | |
191ef966 | 256 | if ((fdi != -1) && (fdi != fd)) { |
ba180fd4 JD |
257 | printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds " |
258 | "and pollfds, fd %d vs %d, need %d\n", irq->fd, | |
9b4f018d JD |
259 | fdi, fd); |
260 | irq = NULL; | |
261 | goto out; | |
262 | } | |
263 | *index_out = i; | |
264 | out: | |
191ef966 | 265 | return irq; |
9b4f018d JD |
266 | } |
267 | ||
268 | void reactivate_fd(int fd, int irqnum) | |
269 | { | |
270 | struct irq_fd *irq; | |
271 | unsigned long flags; | |
272 | int i; | |
273 | ||
bfaafd71 | 274 | spin_lock_irqsave(&irq_lock, flags); |
9b4f018d | 275 | irq = find_irq_by_fd(fd, irqnum, &i); |
191ef966 | 276 | if (irq == NULL) { |
bfaafd71 | 277 | spin_unlock_irqrestore(&irq_lock, flags); |
9b4f018d JD |
278 | return; |
279 | } | |
280 | os_set_pollfd(i, irq->fd); | |
bfaafd71 | 281 | spin_unlock_irqrestore(&irq_lock, flags); |
9b4f018d | 282 | |
19bdf040 | 283 | add_sigio_fd(fd); |
9b4f018d JD |
284 | } |
285 | ||
286 | void deactivate_fd(int fd, int irqnum) | |
287 | { | |
288 | struct irq_fd *irq; | |
289 | unsigned long flags; | |
290 | int i; | |
291 | ||
bfaafd71 | 292 | spin_lock_irqsave(&irq_lock, flags); |
9b4f018d | 293 | irq = find_irq_by_fd(fd, irqnum, &i); |
ba180fd4 | 294 | if (irq == NULL) { |
19bdf040 JD |
295 | spin_unlock_irqrestore(&irq_lock, flags); |
296 | return; | |
297 | } | |
298 | ||
9b4f018d | 299 | os_set_pollfd(i, -1); |
bfaafd71 | 300 | spin_unlock_irqrestore(&irq_lock, flags); |
19bdf040 JD |
301 | |
302 | ignore_sigio_fd(fd); | |
9b4f018d JD |
303 | } |
304 | ||
d973a77b JD |
305 | /* |
306 | * Called just before shutdown in order to provide a clean exec | |
307 | * environment in case the system is rebooting. No locking because | |
308 | * that would cause a pointless shutdown hang if something hadn't | |
309 | * released the lock. | |
310 | */ | |
9b4f018d JD |
311 | int deactivate_all_fds(void) |
312 | { | |
313 | struct irq_fd *irq; | |
314 | int err; | |
315 | ||
191ef966 | 316 | for (irq = active_fds; irq != NULL; irq = irq->next) { |
9b4f018d | 317 | err = os_clear_fd_async(irq->fd); |
191ef966 JJ |
318 | if (err) |
319 | return err; | |
9b4f018d JD |
320 | } |
321 | /* If there is a signal already queued, after unblocking ignore it */ | |
322 | os_set_ioignore(); | |
323 | ||
191ef966 | 324 | return 0; |
9b4f018d JD |
325 | } |
326 | ||
1da177e4 | 327 | /* |
b60745b9 | 328 | * do_IRQ handles all normal device IRQs (the special |
1da177e4 LT |
329 | * SMP cross-CPU interrupts have their own specific |
330 | * handlers). | |
331 | */ | |
77bf4400 | 332 | unsigned int do_IRQ(int irq, struct uml_pt_regs *regs) |
1da177e4 | 333 | { |
7bea96fd AV |
334 | struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs); |
335 | irq_enter(); | |
336 | __do_IRQ(irq); | |
337 | irq_exit(); | |
338 | set_irq_regs(old_regs); | |
339 | return 1; | |
1da177e4 LT |
340 | } |
341 | ||
342 | int um_request_irq(unsigned int irq, int fd, int type, | |
40220c1a | 343 | irq_handler_t handler, |
1da177e4 LT |
344 | unsigned long irqflags, const char * devname, |
345 | void *dev_id) | |
346 | { | |
347 | int err; | |
348 | ||
9ac625a3 | 349 | if (fd != -1) { |
1da177e4 | 350 | err = activate_fd(irq, fd, type, dev_id); |
9ac625a3 JD |
351 | if (err) |
352 | return err; | |
353 | } | |
354 | ||
355 | return request_irq(irq, handler, irqflags, devname, dev_id); | |
1da177e4 | 356 | } |
9ac625a3 | 357 | |
1da177e4 LT |
358 | EXPORT_SYMBOL(um_request_irq); |
359 | EXPORT_SYMBOL(reactivate_fd); | |
360 | ||
ba180fd4 | 361 | /* |
6fa851c3 | 362 | * irq_chip must define (startup || enable) && |
ba180fd4 JD |
363 | * (shutdown || disable) && end |
364 | */ | |
1da177e4 LT |
365 | static void dummy(unsigned int irq) |
366 | { | |
367 | } | |
368 | ||
dbce706e | 369 | /* This is used for everything else than the timer. */ |
6fa851c3 | 370 | static struct irq_chip normal_irq_type = { |
1da177e4 | 371 | .typename = "SIGIO", |
dbce706e | 372 | .release = free_irq_by_irq_and_dev, |
1da177e4 LT |
373 | .disable = dummy, |
374 | .enable = dummy, | |
375 | .ack = dummy, | |
376 | .end = dummy | |
377 | }; | |
378 | ||
6fa851c3 | 379 | static struct irq_chip SIGVTALRM_irq_type = { |
1da177e4 | 380 | .typename = "SIGVTALRM", |
dbce706e | 381 | .release = free_irq_by_irq_and_dev, |
1da177e4 LT |
382 | .shutdown = dummy, /* never called */ |
383 | .disable = dummy, | |
384 | .enable = dummy, | |
385 | .ack = dummy, | |
386 | .end = dummy | |
387 | }; | |
388 | ||
389 | void __init init_IRQ(void) | |
390 | { | |
391 | int i; | |
392 | ||
393 | irq_desc[TIMER_IRQ].status = IRQ_DISABLED; | |
394 | irq_desc[TIMER_IRQ].action = NULL; | |
395 | irq_desc[TIMER_IRQ].depth = 1; | |
d1bef4ed | 396 | irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type; |
1da177e4 | 397 | enable_irq(TIMER_IRQ); |
191ef966 | 398 | for (i = 1; i < NR_IRQS; i++) { |
1da177e4 LT |
399 | irq_desc[i].status = IRQ_DISABLED; |
400 | irq_desc[i].action = NULL; | |
401 | irq_desc[i].depth = 1; | |
d1bef4ed | 402 | irq_desc[i].chip = &normal_irq_type; |
1da177e4 LT |
403 | enable_irq(i); |
404 | } | |
1da177e4 LT |
405 | } |
406 | ||
c14b8494 JD |
407 | /* |
408 | * IRQ stack entry and exit: | |
409 | * | |
410 | * Unlike i386, UML doesn't receive IRQs on the normal kernel stack | |
411 | * and switch over to the IRQ stack after some preparation. We use | |
412 | * sigaltstack to receive signals on a separate stack from the start. | |
413 | * These two functions make sure the rest of the kernel won't be too | |
414 | * upset by being on a different stack. The IRQ stack has a | |
415 | * thread_info structure at the bottom so that current et al continue | |
416 | * to work. | |
417 | * | |
418 | * to_irq_stack copies the current task's thread_info to the IRQ stack | |
419 | * thread_info and sets the tasks's stack to point to the IRQ stack. | |
420 | * | |
421 | * from_irq_stack copies the thread_info struct back (flags may have | |
422 | * been modified) and resets the task's stack pointer. | |
423 | * | |
424 | * Tricky bits - | |
425 | * | |
426 | * What happens when two signals race each other? UML doesn't block | |
427 | * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal | |
428 | * could arrive while a previous one is still setting up the | |
429 | * thread_info. | |
430 | * | |
431 | * There are three cases - | |
432 | * The first interrupt on the stack - sets up the thread_info and | |
433 | * handles the interrupt | |
434 | * A nested interrupt interrupting the copying of the thread_info - | |
435 | * can't handle the interrupt, as the stack is in an unknown state | |
436 | * A nested interrupt not interrupting the copying of the | |
437 | * thread_info - doesn't do any setup, just handles the interrupt | |
438 | * | |
439 | * The first job is to figure out whether we interrupted stack setup. | |
440 | * This is done by xchging the signal mask with thread_info->pending. | |
441 | * If the value that comes back is zero, then there is no setup in | |
442 | * progress, and the interrupt can be handled. If the value is | |
443 | * non-zero, then there is stack setup in progress. In order to have | |
444 | * the interrupt handled, we leave our signal in the mask, and it will | |
445 | * be handled by the upper handler after it has set up the stack. | |
446 | * | |
447 | * Next is to figure out whether we are the outer handler or a nested | |
448 | * one. As part of setting up the stack, thread_info->real_thread is | |
449 | * set to non-NULL (and is reset to NULL on exit). This is the | |
450 | * nesting indicator. If it is non-NULL, then the stack is already | |
451 | * set up and the handler can run. | |
452 | */ | |
453 | ||
454 | static unsigned long pending_mask; | |
455 | ||
508a9274 | 456 | unsigned long to_irq_stack(unsigned long *mask_out) |
c14b8494 JD |
457 | { |
458 | struct thread_info *ti; | |
459 | unsigned long mask, old; | |
460 | int nested; | |
461 | ||
508a9274 | 462 | mask = xchg(&pending_mask, *mask_out); |
ba180fd4 JD |
463 | if (mask != 0) { |
464 | /* | |
465 | * If any interrupts come in at this point, we want to | |
c14b8494 JD |
466 | * make sure that their bits aren't lost by our |
467 | * putting our bit in. So, this loop accumulates bits | |
468 | * until xchg returns the same value that we put in. | |
469 | * When that happens, there were no new interrupts, | |
470 | * and pending_mask contains a bit for each interrupt | |
471 | * that came in. | |
472 | */ | |
508a9274 | 473 | old = *mask_out; |
c14b8494 JD |
474 | do { |
475 | old |= mask; | |
476 | mask = xchg(&pending_mask, old); | |
ba180fd4 | 477 | } while (mask != old); |
c14b8494 JD |
478 | return 1; |
479 | } | |
480 | ||
481 | ti = current_thread_info(); | |
482 | nested = (ti->real_thread != NULL); | |
ba180fd4 | 483 | if (!nested) { |
c14b8494 JD |
484 | struct task_struct *task; |
485 | struct thread_info *tti; | |
486 | ||
487 | task = cpu_tasks[ti->cpu].task; | |
488 | tti = task_thread_info(task); | |
508a9274 | 489 | |
c14b8494 JD |
490 | *ti = *tti; |
491 | ti->real_thread = tti; | |
492 | task->stack = ti; | |
493 | } | |
494 | ||
495 | mask = xchg(&pending_mask, 0); | |
496 | *mask_out |= mask | nested; | |
497 | return 0; | |
498 | } | |
499 | ||
500 | unsigned long from_irq_stack(int nested) | |
501 | { | |
502 | struct thread_info *ti, *to; | |
503 | unsigned long mask; | |
504 | ||
505 | ti = current_thread_info(); | |
506 | ||
507 | pending_mask = 1; | |
508 | ||
509 | to = ti->real_thread; | |
510 | current->stack = to; | |
511 | ti->real_thread = NULL; | |
512 | *to = *ti; | |
513 | ||
514 | mask = xchg(&pending_mask, 0); | |
515 | return mask & ~1; | |
516 | } | |
517 |