4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
34 #include <asm/ptrace.h>
37 #include <asm/io_apic.h>
38 #include <asm/sync_bitops.h>
39 #include <asm/xen/hypercall.h>
40 #include <asm/xen/hypervisor.h>
44 #include <xen/xen-ops.h>
45 #include <xen/events.h>
46 #include <xen/interface/xen.h>
47 #include <xen/interface/event_channel.h>
48 #include <xen/interface/hvm/hvm_op.h>
49 #include <xen/interface/hvm/params.h>
52 * This lock protects updates to the following mapping and reference-count
53 * arrays. The lock does not need to be acquired to read the mapping tables.
55 static DEFINE_SPINLOCK(irq_mapping_update_lock);
57 /* IRQ <-> VIRQ mapping. */
58 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
60 /* IRQ <-> IPI mapping */
61 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
63 /* Interrupt types. */
73 * Packed IRQ information:
74 * type - enum xen_irq_type
75 * event channel - irq->event channel mapping
76 * cpu - cpu this event channel is bound to
77 * index - type-specific information:
78 * PIRQ - vector, with MSB being "needs EIO"
85 enum xen_irq_type type; /* type */
86 unsigned short evtchn; /* event channel */
87 unsigned short cpu; /* cpu bound */
99 #define PIRQ_NEEDS_EOI (1 << 0)
100 #define PIRQ_SHAREABLE (1 << 1)
102 static struct irq_info *irq_info;
104 static int *evtchn_to_irq;
105 struct cpu_evtchn_s {
106 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
109 static __initdata struct cpu_evtchn_s init_evtchn_mask = {
110 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
112 static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
114 static inline unsigned long *cpu_evtchn_mask(int cpu)
116 return cpu_evtchn_mask_p[cpu].bits;
119 /* Xen will never allocate port zero for any purpose. */
120 #define VALID_EVTCHN(chn) ((chn) != 0)
122 static struct irq_chip xen_dynamic_chip;
123 static struct irq_chip xen_percpu_chip;
124 static struct irq_chip xen_pirq_chip;
126 /* Constructor for packed IRQ information. */
127 static struct irq_info mk_unbound_info(void)
129 return (struct irq_info) { .type = IRQT_UNBOUND };
132 static struct irq_info mk_evtchn_info(unsigned short evtchn)
134 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
138 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
140 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
141 .cpu = 0, .u.ipi = ipi };
144 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
146 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
147 .cpu = 0, .u.virq = virq };
150 static struct irq_info mk_pirq_info(unsigned short evtchn,
151 unsigned short gsi, unsigned short vector)
153 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
154 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
158 * Accessors for packed IRQ information.
160 static struct irq_info *info_for_irq(unsigned irq)
162 return &irq_info[irq];
165 static unsigned int evtchn_from_irq(unsigned irq)
167 return info_for_irq(irq)->evtchn;
170 unsigned irq_from_evtchn(unsigned int evtchn)
172 return evtchn_to_irq[evtchn];
174 EXPORT_SYMBOL_GPL(irq_from_evtchn);
176 static enum ipi_vector ipi_from_irq(unsigned irq)
178 struct irq_info *info = info_for_irq(irq);
180 BUG_ON(info == NULL);
181 BUG_ON(info->type != IRQT_IPI);
186 static unsigned virq_from_irq(unsigned irq)
188 struct irq_info *info = info_for_irq(irq);
190 BUG_ON(info == NULL);
191 BUG_ON(info->type != IRQT_VIRQ);
196 static unsigned gsi_from_irq(unsigned irq)
198 struct irq_info *info = info_for_irq(irq);
200 BUG_ON(info == NULL);
201 BUG_ON(info->type != IRQT_PIRQ);
203 return info->u.pirq.gsi;
206 static unsigned vector_from_irq(unsigned irq)
208 struct irq_info *info = info_for_irq(irq);
210 BUG_ON(info == NULL);
211 BUG_ON(info->type != IRQT_PIRQ);
213 return info->u.pirq.vector;
216 static enum xen_irq_type type_from_irq(unsigned irq)
218 return info_for_irq(irq)->type;
221 static unsigned cpu_from_irq(unsigned irq)
223 return info_for_irq(irq)->cpu;
226 static unsigned int cpu_from_evtchn(unsigned int evtchn)
228 int irq = evtchn_to_irq[evtchn];
232 ret = cpu_from_irq(irq);
237 static bool pirq_needs_eoi(unsigned irq)
239 struct irq_info *info = info_for_irq(irq);
241 BUG_ON(info->type != IRQT_PIRQ);
243 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
246 static inline unsigned long active_evtchns(unsigned int cpu,
247 struct shared_info *sh,
250 return (sh->evtchn_pending[idx] &
251 cpu_evtchn_mask(cpu)[idx] &
252 ~sh->evtchn_mask[idx]);
255 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
257 int irq = evtchn_to_irq[chn];
261 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
264 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
265 __set_bit(chn, cpu_evtchn_mask(cpu));
267 irq_info[irq].cpu = cpu;
270 static void init_evtchn_cpu_bindings(void)
273 struct irq_desc *desc;
276 /* By default all event channels notify CPU#0. */
277 for_each_irq_desc(i, desc) {
278 cpumask_copy(desc->affinity, cpumask_of(0));
282 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
285 static inline void clear_evtchn(int port)
287 struct shared_info *s = HYPERVISOR_shared_info;
288 sync_clear_bit(port, &s->evtchn_pending[0]);
291 static inline void set_evtchn(int port)
293 struct shared_info *s = HYPERVISOR_shared_info;
294 sync_set_bit(port, &s->evtchn_pending[0]);
297 static inline int test_evtchn(int port)
299 struct shared_info *s = HYPERVISOR_shared_info;
300 return sync_test_bit(port, &s->evtchn_pending[0]);
305 * notify_remote_via_irq - send event to remote end of event channel via irq
306 * @irq: irq of event channel to send event to
308 * Unlike notify_remote_via_evtchn(), this is safe to use across
309 * save/restore. Notifications on a broken connection are silently
312 void notify_remote_via_irq(int irq)
314 int evtchn = evtchn_from_irq(irq);
316 if (VALID_EVTCHN(evtchn))
317 notify_remote_via_evtchn(evtchn);
319 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
321 static void mask_evtchn(int port)
323 struct shared_info *s = HYPERVISOR_shared_info;
324 sync_set_bit(port, &s->evtchn_mask[0]);
327 static void unmask_evtchn(int port)
329 struct shared_info *s = HYPERVISOR_shared_info;
330 unsigned int cpu = get_cpu();
332 BUG_ON(!irqs_disabled());
334 /* Slow path (hypercall) if this is a non-local port. */
335 if (unlikely(cpu != cpu_from_evtchn(port))) {
336 struct evtchn_unmask unmask = { .port = port };
337 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
339 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
341 sync_clear_bit(port, &s->evtchn_mask[0]);
344 * The following is basically the equivalent of
345 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
346 * the interrupt edge' if the channel is masked.
348 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
349 !sync_test_and_set_bit(port / BITS_PER_LONG,
350 &vcpu_info->evtchn_pending_sel))
351 vcpu_info->evtchn_upcall_pending = 1;
357 static int get_nr_hw_irqs(void)
361 #ifdef CONFIG_X86_IO_APIC
362 ret = get_nr_irqs_gsi();
368 static int find_unbound_irq(void)
370 struct irq_data *data;
372 int start = get_nr_hw_irqs();
374 if (start == nr_irqs)
377 /* nr_irqs is a magic value. Must not use it.*/
378 for (irq = nr_irqs-1; irq > start; irq--) {
379 data = irq_get_irq_data(irq);
380 /* only 0->15 have init'd desc; handle irq > 16 */
383 if (data->chip == &no_irq_chip)
385 if (data->chip != &xen_dynamic_chip)
387 if (irq_info[irq].type == IRQT_UNBOUND)
394 res = irq_alloc_desc_at(irq, 0);
396 if (WARN_ON(res != irq))
402 panic("No available IRQ to bind to: increase nr_irqs!\n");
405 static bool identity_mapped_irq(unsigned irq)
407 /* identity map all the hardware irqs */
408 return irq < get_nr_hw_irqs();
411 static void pirq_unmask_notify(int irq)
413 struct physdev_eoi eoi = { .irq = irq };
415 if (unlikely(pirq_needs_eoi(irq))) {
416 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
421 static void pirq_query_unmask(int irq)
423 struct physdev_irq_status_query irq_status;
424 struct irq_info *info = info_for_irq(irq);
426 BUG_ON(info->type != IRQT_PIRQ);
428 irq_status.irq = irq;
429 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
430 irq_status.flags = 0;
432 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
433 if (irq_status.flags & XENIRQSTAT_needs_eoi)
434 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
437 static bool probing_irq(int irq)
439 struct irq_desc *desc = irq_to_desc(irq);
441 return desc && desc->action == NULL;
444 static unsigned int startup_pirq(unsigned int irq)
446 struct evtchn_bind_pirq bind_pirq;
447 struct irq_info *info = info_for_irq(irq);
448 int evtchn = evtchn_from_irq(irq);
451 BUG_ON(info->type != IRQT_PIRQ);
453 if (VALID_EVTCHN(evtchn))
456 bind_pirq.pirq = irq;
457 /* NB. We are happy to share unless we are probing. */
458 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
459 BIND_PIRQ__WILL_SHARE : 0;
460 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
462 if (!probing_irq(irq))
463 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
467 evtchn = bind_pirq.port;
469 pirq_query_unmask(irq);
471 evtchn_to_irq[evtchn] = irq;
472 bind_evtchn_to_cpu(evtchn, 0);
473 info->evtchn = evtchn;
476 unmask_evtchn(evtchn);
477 pirq_unmask_notify(irq);
482 static void shutdown_pirq(unsigned int irq)
484 struct evtchn_close close;
485 struct irq_info *info = info_for_irq(irq);
486 int evtchn = evtchn_from_irq(irq);
488 BUG_ON(info->type != IRQT_PIRQ);
490 if (!VALID_EVTCHN(evtchn))
496 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
499 bind_evtchn_to_cpu(evtchn, 0);
500 evtchn_to_irq[evtchn] = -1;
504 static void enable_pirq(unsigned int irq)
509 static void disable_pirq(unsigned int irq)
513 static void ack_pirq(unsigned int irq)
515 int evtchn = evtchn_from_irq(irq);
517 move_native_irq(irq);
519 if (VALID_EVTCHN(evtchn)) {
521 clear_evtchn(evtchn);
525 static void end_pirq(unsigned int irq)
527 int evtchn = evtchn_from_irq(irq);
528 struct irq_desc *desc = irq_to_desc(irq);
533 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
534 (IRQ_DISABLED|IRQ_PENDING)) {
536 } else if (VALID_EVTCHN(evtchn)) {
537 unmask_evtchn(evtchn);
538 pirq_unmask_notify(irq);
542 static int find_irq_by_gsi(unsigned gsi)
546 for (irq = 0; irq < nr_irqs; irq++) {
547 struct irq_info *info = info_for_irq(irq);
549 if (info == NULL || info->type != IRQT_PIRQ)
552 if (gsi_from_irq(irq) == gsi)
559 /* xen_allocate_irq might allocate irqs from the top down, as a
560 * consequence don't assume that the irq number returned has a low value
561 * or can be used as a pirq number unless you know otherwise.
563 * One notable exception is when xen_allocate_irq is called passing an
564 * hardware gsi as argument, in that case the irq number returned
565 * matches the gsi number passed as first argument.
567 * Note: We don't assign an
568 * event channel until the irq actually started up. Return an
569 * existing irq if we've already got one for the gsi.
571 int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
574 struct physdev_irq irq_op;
576 spin_lock(&irq_mapping_update_lock);
578 irq = find_irq_by_gsi(gsi);
580 printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n",
582 goto out; /* XXX need refcount? */
585 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
586 * we are using the !xen_initial_domain() to drop in the function.*/
587 if (identity_mapped_irq(gsi) || !xen_initial_domain()) {
589 irq_alloc_desc_at(irq, 0);
591 irq = find_unbound_irq();
593 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
594 handle_level_irq, name);
599 /* Only the privileged domain can do this. For non-priv, the pcifront
600 * driver provides a PCI bus that does the call to do exactly
601 * this in the priv domain. */
602 if (xen_initial_domain() &&
603 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
609 irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector);
610 irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
613 spin_unlock(&irq_mapping_update_lock);
618 int xen_destroy_irq(int irq)
620 struct irq_desc *desc;
623 spin_lock(&irq_mapping_update_lock);
625 desc = irq_to_desc(irq);
629 irq_info[irq] = mk_unbound_info();
634 spin_unlock(&irq_mapping_update_lock);
638 int xen_vector_from_irq(unsigned irq)
640 return vector_from_irq(irq);
643 int xen_gsi_from_irq(unsigned irq)
645 return gsi_from_irq(irq);
648 int bind_evtchn_to_irq(unsigned int evtchn)
652 spin_lock(&irq_mapping_update_lock);
654 irq = evtchn_to_irq[evtchn];
657 irq = find_unbound_irq();
659 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
660 handle_edge_irq, "event");
662 evtchn_to_irq[evtchn] = irq;
663 irq_info[irq] = mk_evtchn_info(evtchn);
666 spin_unlock(&irq_mapping_update_lock);
670 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
672 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
674 struct evtchn_bind_ipi bind_ipi;
677 spin_lock(&irq_mapping_update_lock);
679 irq = per_cpu(ipi_to_irq, cpu)[ipi];
682 irq = find_unbound_irq();
686 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
687 handle_percpu_irq, "ipi");
690 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
693 evtchn = bind_ipi.port;
695 evtchn_to_irq[evtchn] = irq;
696 irq_info[irq] = mk_ipi_info(evtchn, ipi);
697 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
699 bind_evtchn_to_cpu(evtchn, cpu);
703 spin_unlock(&irq_mapping_update_lock);
708 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
710 struct evtchn_bind_virq bind_virq;
713 spin_lock(&irq_mapping_update_lock);
715 irq = per_cpu(virq_to_irq, cpu)[virq];
718 bind_virq.virq = virq;
719 bind_virq.vcpu = cpu;
720 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
723 evtchn = bind_virq.port;
725 irq = find_unbound_irq();
727 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
728 handle_percpu_irq, "virq");
730 evtchn_to_irq[evtchn] = irq;
731 irq_info[irq] = mk_virq_info(evtchn, virq);
733 per_cpu(virq_to_irq, cpu)[virq] = irq;
735 bind_evtchn_to_cpu(evtchn, cpu);
738 spin_unlock(&irq_mapping_update_lock);
743 static void unbind_from_irq(unsigned int irq)
745 struct evtchn_close close;
746 int evtchn = evtchn_from_irq(irq);
748 spin_lock(&irq_mapping_update_lock);
750 if (VALID_EVTCHN(evtchn)) {
752 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
755 switch (type_from_irq(irq)) {
757 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
758 [virq_from_irq(irq)] = -1;
761 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
762 [ipi_from_irq(irq)] = -1;
768 /* Closed ports are implicitly re-bound to VCPU0. */
769 bind_evtchn_to_cpu(evtchn, 0);
771 evtchn_to_irq[evtchn] = -1;
774 if (irq_info[irq].type != IRQT_UNBOUND) {
775 irq_info[irq] = mk_unbound_info();
780 spin_unlock(&irq_mapping_update_lock);
783 int bind_evtchn_to_irqhandler(unsigned int evtchn,
784 irq_handler_t handler,
785 unsigned long irqflags,
786 const char *devname, void *dev_id)
791 irq = bind_evtchn_to_irq(evtchn);
792 retval = request_irq(irq, handler, irqflags, devname, dev_id);
794 unbind_from_irq(irq);
800 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
802 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
803 irq_handler_t handler,
804 unsigned long irqflags, const char *devname, void *dev_id)
809 irq = bind_virq_to_irq(virq, cpu);
810 retval = request_irq(irq, handler, irqflags, devname, dev_id);
812 unbind_from_irq(irq);
818 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
820 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
822 irq_handler_t handler,
823 unsigned long irqflags,
829 irq = bind_ipi_to_irq(ipi, cpu);
833 irqflags |= IRQF_NO_SUSPEND;
834 retval = request_irq(irq, handler, irqflags, devname, dev_id);
836 unbind_from_irq(irq);
843 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
845 free_irq(irq, dev_id);
846 unbind_from_irq(irq);
848 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
850 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
852 int irq = per_cpu(ipi_to_irq, cpu)[vector];
854 notify_remote_via_irq(irq);
857 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
859 struct shared_info *sh = HYPERVISOR_shared_info;
860 int cpu = smp_processor_id();
863 static DEFINE_SPINLOCK(debug_lock);
865 spin_lock_irqsave(&debug_lock, flags);
867 printk("vcpu %d\n ", cpu);
869 for_each_online_cpu(i) {
870 struct vcpu_info *v = per_cpu(xen_vcpu, i);
871 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
872 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
873 v->evtchn_upcall_pending,
874 v->evtchn_pending_sel);
876 printk("pending:\n ");
877 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
878 printk("%08lx%s", sh->evtchn_pending[i],
879 i % 8 == 0 ? "\n " : " ");
880 printk("\nmasks:\n ");
881 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
882 printk("%08lx%s", sh->evtchn_mask[i],
883 i % 8 == 0 ? "\n " : " ");
885 printk("\nunmasked:\n ");
886 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
887 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
888 i % 8 == 0 ? "\n " : " ");
890 printk("\npending list:\n");
891 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
892 if (sync_test_bit(i, sh->evtchn_pending)) {
893 printk(" %d: event %d -> irq %d\n",
894 cpu_from_evtchn(i), i,
899 spin_unlock_irqrestore(&debug_lock, flags);
904 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
907 * Search the CPUs pending events bitmasks. For each one found, map
908 * the event number to an irq, and feed it into do_IRQ() for
911 * Xen uses a two-level bitmap to speed searching. The first level is
912 * a bitset of words which contain pending event bits. The second
913 * level is a bitset of pending events themselves.
915 static void __xen_evtchn_do_upcall(void)
918 struct shared_info *s = HYPERVISOR_shared_info;
919 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
923 unsigned long pending_words;
925 vcpu_info->evtchn_upcall_pending = 0;
927 if (__get_cpu_var(xed_nesting_count)++)
930 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
931 /* Clear master flag /before/ clearing selector flag. */
934 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
935 while (pending_words != 0) {
936 unsigned long pending_bits;
937 int word_idx = __ffs(pending_words);
938 pending_words &= ~(1UL << word_idx);
940 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
941 int bit_idx = __ffs(pending_bits);
942 int port = (word_idx * BITS_PER_LONG) + bit_idx;
943 int irq = evtchn_to_irq[port];
944 struct irq_desc *desc;
947 desc = irq_to_desc(irq);
949 generic_handle_irq_desc(irq, desc);
954 BUG_ON(!irqs_disabled());
956 count = __get_cpu_var(xed_nesting_count);
957 __get_cpu_var(xed_nesting_count) = 0;
958 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
965 void xen_evtchn_do_upcall(struct pt_regs *regs)
967 struct pt_regs *old_regs = set_irq_regs(regs);
972 __xen_evtchn_do_upcall();
975 set_irq_regs(old_regs);
978 void xen_hvm_evtchn_do_upcall(void)
980 __xen_evtchn_do_upcall();
982 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
984 /* Rebind a new event channel to an existing irq. */
985 void rebind_evtchn_irq(int evtchn, int irq)
987 struct irq_info *info = info_for_irq(irq);
989 /* Make sure the irq is masked, since the new event channel
990 will also be masked. */
993 spin_lock(&irq_mapping_update_lock);
995 /* After resume the irq<->evtchn mappings are all cleared out */
996 BUG_ON(evtchn_to_irq[evtchn] != -1);
997 /* Expect irq to have been bound before,
998 so there should be a proper type */
999 BUG_ON(info->type == IRQT_UNBOUND);
1001 evtchn_to_irq[evtchn] = irq;
1002 irq_info[irq] = mk_evtchn_info(evtchn);
1004 spin_unlock(&irq_mapping_update_lock);
1006 /* new event channels are always bound to cpu 0 */
1007 irq_set_affinity(irq, cpumask_of(0));
1009 /* Unmask the event channel. */
1013 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1014 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1016 struct evtchn_bind_vcpu bind_vcpu;
1017 int evtchn = evtchn_from_irq(irq);
1019 /* events delivered via platform PCI interrupts are always
1020 * routed to vcpu 0 */
1021 if (!VALID_EVTCHN(evtchn) ||
1022 (xen_hvm_domain() && !xen_have_vector_callback))
1025 /* Send future instances of this interrupt to other vcpu. */
1026 bind_vcpu.port = evtchn;
1027 bind_vcpu.vcpu = tcpu;
1030 * If this fails, it usually just indicates that we're dealing with a
1031 * virq or IPI channel, which don't actually need to be rebound. Ignore
1032 * it, but don't do the xenlinux-level rebind in that case.
1034 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1035 bind_evtchn_to_cpu(evtchn, tcpu);
1040 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
1042 unsigned tcpu = cpumask_first(dest);
1044 return rebind_irq_to_cpu(irq, tcpu);
1047 int resend_irq_on_evtchn(unsigned int irq)
1049 int masked, evtchn = evtchn_from_irq(irq);
1050 struct shared_info *s = HYPERVISOR_shared_info;
1052 if (!VALID_EVTCHN(evtchn))
1055 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1056 sync_set_bit(evtchn, s->evtchn_pending);
1058 unmask_evtchn(evtchn);
1063 static void enable_dynirq(unsigned int irq)
1065 int evtchn = evtchn_from_irq(irq);
1067 if (VALID_EVTCHN(evtchn))
1068 unmask_evtchn(evtchn);
1071 static void disable_dynirq(unsigned int irq)
1073 int evtchn = evtchn_from_irq(irq);
1075 if (VALID_EVTCHN(evtchn))
1076 mask_evtchn(evtchn);
1079 static void ack_dynirq(unsigned int irq)
1081 int evtchn = evtchn_from_irq(irq);
1083 move_native_irq(irq);
1085 if (VALID_EVTCHN(evtchn))
1086 clear_evtchn(evtchn);
1089 static int retrigger_dynirq(unsigned int irq)
1091 int evtchn = evtchn_from_irq(irq);
1092 struct shared_info *sh = HYPERVISOR_shared_info;
1095 if (VALID_EVTCHN(evtchn)) {
1098 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1099 sync_set_bit(evtchn, sh->evtchn_pending);
1101 unmask_evtchn(evtchn);
1108 static void restore_cpu_virqs(unsigned int cpu)
1110 struct evtchn_bind_virq bind_virq;
1111 int virq, irq, evtchn;
1113 for (virq = 0; virq < NR_VIRQS; virq++) {
1114 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1117 BUG_ON(virq_from_irq(irq) != virq);
1119 /* Get a new binding from Xen. */
1120 bind_virq.virq = virq;
1121 bind_virq.vcpu = cpu;
1122 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1125 evtchn = bind_virq.port;
1127 /* Record the new mapping. */
1128 evtchn_to_irq[evtchn] = irq;
1129 irq_info[irq] = mk_virq_info(evtchn, virq);
1130 bind_evtchn_to_cpu(evtchn, cpu);
1132 /* Ready for use. */
1133 unmask_evtchn(evtchn);
1137 static void restore_cpu_ipis(unsigned int cpu)
1139 struct evtchn_bind_ipi bind_ipi;
1140 int ipi, irq, evtchn;
1142 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1143 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1146 BUG_ON(ipi_from_irq(irq) != ipi);
1148 /* Get a new binding from Xen. */
1149 bind_ipi.vcpu = cpu;
1150 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1153 evtchn = bind_ipi.port;
1155 /* Record the new mapping. */
1156 evtchn_to_irq[evtchn] = irq;
1157 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1158 bind_evtchn_to_cpu(evtchn, cpu);
1160 /* Ready for use. */
1161 unmask_evtchn(evtchn);
1166 /* Clear an irq's pending state, in preparation for polling on it */
1167 void xen_clear_irq_pending(int irq)
1169 int evtchn = evtchn_from_irq(irq);
1171 if (VALID_EVTCHN(evtchn))
1172 clear_evtchn(evtchn);
1174 EXPORT_SYMBOL(xen_clear_irq_pending);
1175 void xen_set_irq_pending(int irq)
1177 int evtchn = evtchn_from_irq(irq);
1179 if (VALID_EVTCHN(evtchn))
1183 bool xen_test_irq_pending(int irq)
1185 int evtchn = evtchn_from_irq(irq);
1188 if (VALID_EVTCHN(evtchn))
1189 ret = test_evtchn(evtchn);
1194 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1195 * the irq will be disabled so it won't deliver an interrupt. */
1196 void xen_poll_irq_timeout(int irq, u64 timeout)
1198 evtchn_port_t evtchn = evtchn_from_irq(irq);
1200 if (VALID_EVTCHN(evtchn)) {
1201 struct sched_poll poll;
1204 poll.timeout = timeout;
1205 set_xen_guest_handle(poll.ports, &evtchn);
1207 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1211 EXPORT_SYMBOL(xen_poll_irq_timeout);
1212 /* Poll waiting for an irq to become pending. In the usual case, the
1213 * irq will be disabled so it won't deliver an interrupt. */
1214 void xen_poll_irq(int irq)
1216 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1219 void xen_irq_resume(void)
1221 unsigned int cpu, irq, evtchn;
1223 init_evtchn_cpu_bindings();
1225 /* New event-channel space is not 'live' yet. */
1226 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1227 mask_evtchn(evtchn);
1229 /* No IRQ <-> event-channel mappings. */
1230 for (irq = 0; irq < nr_irqs; irq++)
1231 irq_info[irq].evtchn = 0; /* zap event-channel binding */
1233 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1234 evtchn_to_irq[evtchn] = -1;
1236 for_each_possible_cpu(cpu) {
1237 restore_cpu_virqs(cpu);
1238 restore_cpu_ipis(cpu);
1242 static struct irq_chip xen_dynamic_chip __read_mostly = {
1245 .disable = disable_dynirq,
1246 .mask = disable_dynirq,
1247 .unmask = enable_dynirq,
1250 .set_affinity = set_affinity_irq,
1251 .retrigger = retrigger_dynirq,
1254 static struct irq_chip xen_pirq_chip __read_mostly = {
1257 .startup = startup_pirq,
1258 .shutdown = shutdown_pirq,
1260 .enable = enable_pirq,
1261 .unmask = enable_pirq,
1263 .disable = disable_pirq,
1264 .mask = disable_pirq,
1269 .set_affinity = set_affinity_irq,
1271 .retrigger = retrigger_dynirq,
1274 static struct irq_chip xen_percpu_chip __read_mostly = {
1275 .name = "xen-percpu",
1277 .disable = disable_dynirq,
1278 .mask = disable_dynirq,
1279 .unmask = enable_dynirq,
1284 int xen_set_callback_via(uint64_t via)
1286 struct xen_hvm_param a;
1287 a.domid = DOMID_SELF;
1288 a.index = HVM_PARAM_CALLBACK_IRQ;
1290 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1292 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1294 #ifdef CONFIG_XEN_PVHVM
1295 /* Vector callbacks are better than PCI interrupts to receive event
1296 * channel notifications because we can receive vector callbacks on any
1297 * vcpu and we don't need PCI support or APIC interactions. */
1298 void xen_callback_vector(void)
1301 uint64_t callback_via;
1302 if (xen_have_vector_callback) {
1303 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1304 rc = xen_set_callback_via(callback_via);
1306 printk(KERN_ERR "Request for Xen HVM callback vector"
1308 xen_have_vector_callback = 0;
1311 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1313 /* in the restore case the vector has already been allocated */
1314 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1315 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1319 void xen_callback_vector(void) {}
1322 void __init xen_init_IRQ(void)
1326 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1328 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1330 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1332 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1333 evtchn_to_irq[i] = -1;
1335 init_evtchn_cpu_bindings();
1337 /* No event channels are 'live' right now. */
1338 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1341 if (xen_hvm_domain()) {
1342 xen_callback_vector();
1345 irq_ctx_init(smp_processor_id());