]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'x86/urgent' of into irq/sparseirq
authorThomas Gleixner <tglx@linutronix.de>
Tue, 12 Oct 2010 14:41:22 +0000 (16:41 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 12 Oct 2010 14:41:26 +0000 (16:41 +0200)
Reason: Pull in the latest io_apic bugfixes

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
63 files changed:
Documentation/DocBook/genericirq.tmpl
MAINTAINERS
arch/arm/kernel/irq.c
arch/arm/mach-bcmring/irq.c
arch/m32r/kernel/irq.c
arch/m32r/platforms/m32104ut/setup.c
arch/m32r/platforms/m32700ut/setup.c
arch/m32r/platforms/mappi/setup.c
arch/m32r/platforms/mappi2/setup.c
arch/m32r/platforms/mappi3/setup.c
arch/m32r/platforms/oaks32r/setup.c
arch/m32r/platforms/opsput/setup.c
arch/m32r/platforms/usrv/setup.c
arch/sh/kernel/irq.c
arch/tile/kernel/irq.c
arch/um/kernel/irq.c
arch/x86/Kconfig
arch/x86/include/asm/apb_timer.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/irq_remapping.h
arch/x86/kernel/apb_timer.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/probe_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/lapic.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/k8topology_64.c
arch/xtensa/kernel/irq.c
drivers/isdn/act2000/act2000.h
drivers/isdn/hisax/config.c
drivers/isdn/hisax/hisax.h
drivers/mfd/twl4030-irq.c
include/linux/irq.h
include/linux/irqdesc.h [new file with mode: 0644]
include/linux/irqnr.h
include/linux/lockdep.h
init/Kconfig
init/main.c
kernel/irq/Kconfig [new file with mode: 0644]
kernel/irq/Makefile
kernel/irq/autoprobe.c
kernel/irq/chip.c
kernel/irq/dummychip.c [new file with mode: 0644]
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/irqdesc.c [new file with mode: 0644]
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/numa_migrate.c
kernel/irq/proc.c
kernel/irq/resend.c
kernel/irq/spurious.c
kernel/softirq.c

index 1448b33fd22272e457e150234a877a54e5082571..fb10fd08c05cd00a49a4276f06842299e092b4a7 100644 (file)
@@ -28,7 +28,7 @@
   </authorgroup>
 
   <copyright>
-   <year>2005-2006</year>
+   <year>2005-2010</year>
    <holder>Thomas Gleixner</holder>
   </copyright>
   <copyright>
          <listitem><para>Edge type</para></listitem>
          <listitem><para>Simple type</para></listitem>
        </itemizedlist>
+       During the implementation we identified another type:
+       <itemizedlist>
+         <listitem><para>Fast EOI type</para></listitem>
+       </itemizedlist>
        In the SMP world of the __do_IRQ() super-handler another type
        was identified:
        <itemizedlist>
        is still available. This leads to a kind of duality for the time
        being. Over time the new model should be used in more and more
        architectures, as it enables smaller and cleaner IRQ subsystems.
+       It's deprecated for three years now and about to be removed.
        </para>
   </chapter>
   <chapter id="bugs">
          <itemizedlist>
          <listitem><para>handle_level_irq</para></listitem>
          <listitem><para>handle_edge_irq</para></listitem>
+         <listitem><para>handle_fasteoi_irq</para></listitem>
          <listitem><para>handle_simple_irq</para></listitem>
          <listitem><para>handle_percpu_irq</para></listitem>
          </itemizedlist>
                are used by the default flow implementations.
                The following helper functions are implemented (simplified excerpt):
                <programlisting>
-default_enable(irq)
+default_enable(struct irq_data *data)
 {
-       desc->chip->unmask(irq);
+       desc->chip->irq_unmask(data);
 }
 
-default_disable(irq)
+default_disable(struct irq_data *data)
 {
-       if (!delay_disable(irq))
-               desc->chip->mask(irq);
+       if (!delay_disable(data))
+               desc->chip->irq_mask(data);
 }
 
-default_ack(irq)
+default_ack(struct irq_data *data)
 {
-       chip->ack(irq);
+       chip->irq_ack(data);
 }
 
-default_mask_ack(irq)
+default_mask_ack(struct irq_data *data)
 {
-       if (chip->mask_ack) {
-               chip->mask_ack(irq);
+       if (chip->irq_mask_ack) {
+               chip->irq_mask_ack(data);
        } else {
-               chip->mask(irq);
-               chip->ack(irq);
+               chip->irq_mask(data);
+               chip->irq_ack(data);
        }
 }
 
-noop(irq)
+noop(struct irq_data *data))
 {
 }
 
@@ -278,12 +284,27 @@ noop(irq)
                <para>
                The following control flow is implemented (simplified excerpt):
                <programlisting>
-desc->chip->start();
+desc->chip->irq_mask();
 handle_IRQ_event(desc->action);
-desc->chip->end();
+desc->chip->irq_unmask();
                </programlisting>
                </para>
-           </sect3>
+           </sect3>
+           <sect3 id="Default_FASTEOI_IRQ_flow_handler">
+               <title>Default Fast EOI IRQ flow handler</title>
+               <para>
+               handle_fasteoi_irq provides a generic implementation
+               for interrupts, which only need an EOI at the end of
+               the handler
+               </para>
+               <para>
+               The following control flow is implemented (simplified excerpt):
+               <programlisting>
+handle_IRQ_event(desc->action);
+desc->chip->irq_eoi();
+               </programlisting>
+               </para>
+           </sect3>
            <sect3 id="Default_Edge_IRQ_flow_handler">
                <title>Default Edge IRQ flow handler</title>
                <para>
@@ -294,20 +315,19 @@ desc->chip->end();
                The following control flow is implemented (simplified excerpt):
                <programlisting>
 if (desc->status &amp; running) {
-       desc->chip->hold();
+       desc->chip->irq_mask();
        desc->status |= pending | masked;
        return;
 }
-desc->chip->start();
+desc->chip->irq_ack();
 desc->status |= running;
 do {
        if (desc->status &amp; masked)
-               desc->chip->enable();
+               desc->chip->irq_unmask();
        desc->status &amp;= ~pending;
        handle_IRQ_event(desc->action);
 } while (status &amp; pending);
 desc->status &amp;= ~running;
-desc->chip->end();
                </programlisting>
                </para>
            </sect3>
@@ -342,9 +362,9 @@ handle_IRQ_event(desc->action);
                <para>
                The following control flow is implemented (simplified excerpt):
                <programlisting>
-desc->chip->start();
 handle_IRQ_event(desc->action);
-desc->chip->end();
+if (desc->chip->irq_eoi)
+        desc->chip->irq_eoi();
                </programlisting>
                </para>
            </sect3>
@@ -375,8 +395,7 @@ desc->chip->end();
        mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when
        you want to use the delayed interrupt disable feature and your
        hardware is not capable of retriggering an interrupt.)
-       The delayed interrupt disable can be runtime enabled, per interrupt,
-       by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field.
+       The delayed interrupt disable is not configurable.
        </para>
        </sect2>
     </sect1>
@@ -387,13 +406,13 @@ desc->chip->end();
        contains all the direct chip relevant functions, which
        can be utilized by the irq flow implementations.
          <itemizedlist>
-         <listitem><para>ack()</para></listitem>
-         <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem>
-         <listitem><para>mask()</para></listitem>
-         <listitem><para>unmask()</para></listitem>
-         <listitem><para>retrigger() - Optional</para></listitem>
-         <listitem><para>set_type() - Optional</para></listitem>
-         <listitem><para>set_wake() - Optional</para></listitem>
+         <listitem><para>irq_ack()</para></listitem>
+         <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
+         <listitem><para>irq_mask()</para></listitem>
+         <listitem><para>irq_unmask()</para></listitem>
+         <listitem><para>irq_retrigger() - Optional</para></listitem>
+         <listitem><para>irq_set_type() - Optional</para></listitem>
+         <listitem><para>irq_set_wake() - Optional</para></listitem>
          </itemizedlist>
        These primitives are strictly intended to mean what they say: ack means
        ACK, masking means masking of an IRQ line, etc. It is up to the flow
@@ -458,6 +477,7 @@ desc->chip->end();
      <para>
      This chapter contains the autogenerated documentation of the internal functions.
      </para>
+!Ikernel/irq/irqdesc.c
 !Ikernel/irq/handle.c
 !Ikernel/irq/chip.c
   </chapter>
index 668682d1f5fa23f296b23c87efa1a8c024f3a62a..62c0acec298d64587296ad5db250ff747cb6992a 100644 (file)
@@ -3210,6 +3210,12 @@ F:       drivers/net/irda/
 F:     include/net/irda/
 F:     net/irda/
 
+IRQ SUBSYSTEM
+M:     Thomas Gleixner <tglx@linutronix.de>
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+F:     kernel/irq/
+
 ISAPNP
 M:     Jaroslav Kysela <perex@perex.cz>
 S:     Maintained
index c0d5c3b3a760fa79624657ebc495b1d7b81b8c66..5456d11d6ae4805e75ba9cbf0b87422b147795f6 100644 (file)
@@ -157,10 +157,8 @@ void __init init_IRQ(void)
        struct irq_desc *desc;
        int irq;
 
-       for (irq = 0; irq < nr_irqs; irq++) {
-               desc = irq_to_desc_alloc_node(irq, 0);
+       for (irq = 0; irq < nr_irqs; irq++)
                desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
-       }
 
        init_arch_irq();
 }
@@ -169,7 +167,7 @@ void __init init_IRQ(void)
 int __init arch_probe_nr_irqs(void)
 {
        nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
-       return 0;
+       return nr_irqs;
 }
 #endif
 
index dc1c4939b0cedc6a03ad5cb7b8e9ba1e5e3f4dad..e3152631eb377e33490536729ae3656bf6575f24 100644 (file)
@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq)
 }
 
 static struct irq_chip bcmring_irq0_chip = {
-       .typename = "ARM-INTC0",
+       .name = "ARM-INTC0",
        .ack = bcmring_mask_irq0,
        .mask = bcmring_mask_irq0,      /* mask a specific interrupt, blocking its delivery. */
        .unmask = bcmring_unmask_irq0,  /* unmaks an interrupt */
 };
 
 static struct irq_chip bcmring_irq1_chip = {
-       .typename = "ARM-INTC1",
+       .name = "ARM-INTC1",
        .ack = bcmring_mask_irq1,
        .mask = bcmring_mask_irq1,
        .unmask = bcmring_unmask_irq1,
 };
 
 static struct irq_chip bcmring_irq2_chip = {
-       .typename = "ARM-SINTC",
+       .name = "ARM-SINTC",
        .ack = bcmring_mask_irq2,
        .mask = bcmring_mask_irq2,
        .unmask = bcmring_unmask_irq2,
index 3c71f776872c51842a7959a911182b3c4afa3353..7db26f1f082d097f24ffa51da083694e05f47a5a 100644 (file)
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
index 922fdfdadeaa220830d52a99f97a1cbe22e7ebd1..402a59d7219b6f368b38b4c9425ed79fa4279539 100644 (file)
@@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
 
 static struct irq_chip m32104ut_irq_type =
 {
-       .typename = "M32104UT-IRQ",
+       .name = "M32104UT-IRQ",
        .startup = startup_m32104ut_irq,
        .shutdown = shutdown_m32104ut_irq,
        .enable = enable_m32104ut_irq,
index 9c1bc7487c1e652ff6bd9cb77ea79c5cdb989caf..80b1a026795aec2b8c8dae642143b4f5424538da 100644 (file)
@@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
 
 static struct irq_chip m32700ut_irq_type =
 {
-       .typename = "M32700UT-IRQ",
+       .name = "M32700UT-IRQ",
        .startup = startup_m32700ut_irq,
        .shutdown = shutdown_m32700ut_irq,
        .enable = enable_m32700ut_irq,
@@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
 
 static struct irq_chip m32700ut_pld_irq_type =
 {
-       .typename = "M32700UT-PLD-IRQ",
+       .name = "M32700UT-PLD-IRQ",
        .startup = startup_m32700ut_pld_irq,
        .shutdown = shutdown_m32700ut_pld_irq,
        .enable = enable_m32700ut_pld_irq,
@@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
 
 static struct irq_chip m32700ut_lanpld_irq_type =
 {
-       .typename = "M32700UT-PLD-LAN-IRQ",
+       .name = "M32700UT-PLD-LAN-IRQ",
        .startup = startup_m32700ut_lanpld_irq,
        .shutdown = shutdown_m32700ut_lanpld_irq,
        .enable = enable_m32700ut_lanpld_irq,
@@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
 
 static struct irq_chip m32700ut_lcdpld_irq_type =
 {
-       .typename = "M32700UT-PLD-LCD-IRQ",
+       .name = "M32700UT-PLD-LCD-IRQ",
        .startup = startup_m32700ut_lcdpld_irq,
        .shutdown = shutdown_m32700ut_lcdpld_irq,
        .enable = enable_m32700ut_lcdpld_irq,
index fb4b17799b66fd090b04f4d02364044f5558123d..ea00c84d6b1b2bb665bf0e88cf8ed885b2f96385 100644 (file)
@@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq)
 
 static struct irq_chip mappi_irq_type =
 {
-       .typename = "MAPPI-IRQ",
+       .name = "MAPPI-IRQ",
        .startup = startup_mappi_irq,
        .shutdown = shutdown_mappi_irq,
        .enable = enable_mappi_irq,
index 6a65eda0a056c67ef039a34cc5d0326251be14d5..c049376d0270064869450bb56948a4edd57c068e 100644 (file)
@@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
 
 static struct irq_chip mappi2_irq_type =
 {
-       .typename = "MAPPI2-IRQ",
+       .name = "MAPPI2-IRQ",
        .startup = startup_mappi2_irq,
        .shutdown = shutdown_mappi2_irq,
        .enable = enable_mappi2_irq,
index 9c337aeac94b7db1a1d3d0f9307ef2116e15c687..882de25c6e8cc7d8f6bc9a55ef9a4c422fefcab2 100644 (file)
@@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
 
 static struct irq_chip mappi3_irq_type =
 {
-       .typename = "MAPPI3-IRQ",
+       .name = "MAPPI3-IRQ",
        .startup = startup_mappi3_irq,
        .shutdown = shutdown_mappi3_irq,
        .enable = enable_mappi3_irq,
index ed865741c38df1c6d9702e94bb17f033fdcca5c2..d11d93bf74f52bd4b14baea3e6fc3da16d83b271 100644 (file)
@@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
 
 static struct irq_chip oaks32r_irq_type =
 {
-       .typename = "OAKS32R-IRQ",
+       .name = "OAKS32R-IRQ",
        .startup = startup_oaks32r_irq,
        .shutdown = shutdown_oaks32r_irq,
        .enable = enable_oaks32r_irq,
index 80d68065701963dff6107f6377bb7fc1c3416707..5f3402a2fbafe3c67cd549ac9c7335ff27aa3e1a 100644 (file)
@@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq)
 
 static struct irq_chip opsput_irq_type =
 {
-       .typename = "OPSPUT-IRQ",
+       .name = "OPSPUT-IRQ",
        .startup = startup_opsput_irq,
        .shutdown = shutdown_opsput_irq,
        .enable = enable_opsput_irq,
@@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
 
 static struct irq_chip opsput_pld_irq_type =
 {
-       .typename = "OPSPUT-PLD-IRQ",
+       .name = "OPSPUT-PLD-IRQ",
        .startup = startup_opsput_pld_irq,
        .shutdown = shutdown_opsput_pld_irq,
        .enable = enable_opsput_pld_irq,
@@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
 
 static struct irq_chip opsput_lanpld_irq_type =
 {
-       .typename = "OPSPUT-PLD-LAN-IRQ",
+       .name = "OPSPUT-PLD-LAN-IRQ",
        .startup = startup_opsput_lanpld_irq,
        .shutdown = shutdown_opsput_lanpld_irq,
        .enable = enable_opsput_lanpld_irq,
index 757302660af84f2f09ffbb0e48fcf946ab27cd45..1beac7a51ed432ea386610b8b1bbe3132e04d6b7 100644 (file)
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
 
 static struct irq_chip mappi_irq_type =
 {
-       .typename = "M32700-IRQ",
+       .name = "M32700-IRQ",
        .startup = startup_mappi_irq,
        .shutdown = shutdown_mappi_irq,
        .enable = enable_mappi_irq,
@@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
 
 static struct irq_chip m32700ut_pld_irq_type =
 {
-       .typename = "USRV-PLD-IRQ",
+       .name = "USRV-PLD-IRQ",
        .startup = startup_m32700ut_pld_irq,
        .shutdown = shutdown_m32700ut_pld_irq,
        .enable = enable_m32700ut_pld_irq,
index 257de1f0692b19ade90daefcf1089c3c127ce72c..ae5bac39b896d052e0b3b1b663e3edb69a754339 100644 (file)
@@ -290,7 +290,7 @@ void __init init_IRQ(void)
 int __init arch_probe_nr_irqs(void)
 {
        nr_irqs = sh_mv.mv_nr_irqs;
-       return 0;
+       return NR_IRQS_LEGACY;
 }
 #endif
 
index 596c600869305ade6f13025a18a20039193ddd64..9a27d563fc30968ce3efb46ac11ce241e10d8cc0 100644 (file)
@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq)
 }
 
 static struct irq_chip tile_irq_chip = {
-       .typename = "tile_irq_chip",
+       .name = "tile_irq_chip",
        .ack = tile_irq_chip_ack,
        .eoi = tile_irq_chip_eoi,
        .mask = tile_irq_chip_mask,
@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action = action->next; action; action = action->next)
index a3f0b04d7101ccb1fea64254339328fbec12412f..a746e3037a5bc896462affb2c62ab493a625cc58 100644 (file)
@@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
@@ -369,7 +369,7 @@ static void dummy(unsigned int irq)
 
 /* This is used for everything else than the timer. */
 static struct irq_chip normal_irq_type = {
-       .typename = "SIGIO",
+       .name = "SIGIO",
        .release = free_irq_by_irq_and_dev,
        .disable = dummy,
        .enable = dummy,
@@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = {
 };
 
 static struct irq_chip SIGVTALRM_irq_type = {
-       .typename = "SIGVTALRM",
+       .name = "SIGVTALRM",
        .release = free_irq_by_irq_and_dev,
        .shutdown = dummy, /* never called */
        .disable = dummy,
index cea0cd9a316fb987bfa611a1dffa06cdba1f0332..3ec657f7ee70303fc669ad38858b5ee7589a0544 100644 (file)
@@ -59,6 +59,11 @@ config X86
        select ANON_INODES
        select HAVE_ARCH_KMEMCHECK
        select HAVE_USER_RETURN_NOTIFIER
+       select HAVE_GENERIC_HARDIRQS
+       select HAVE_SPARSE_IRQ
+       select NUMA_IRQ_DESC if (SPARSE_IRQ && NUMA)
+       select GENERIC_IRQ_PROBE
+       select GENERIC_PENDING_IRQ if SMP
 
 config INSTRUCTION_DECODER
        def_bool (KPROBES || PERF_EVENTS)
@@ -200,20 +205,6 @@ config HAVE_INTEL_TXT
        def_bool y
        depends on EXPERIMENTAL && DMAR && ACPI
 
-# Use the generic interrupt handling code in kernel/irq/:
-config GENERIC_HARDIRQS
-       def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-       def_bool y
-
-config GENERIC_IRQ_PROBE
-       def_bool y
-
-config GENERIC_PENDING_IRQ
-       def_bool y
-       depends on GENERIC_HARDIRQS && SMP
-
 config USE_GENERIC_SMP_HELPERS
        def_bool y
        depends on SMP
@@ -296,23 +287,6 @@ config X86_X2APIC
 
          If you don't know what to do here, say N.
 
-config SPARSE_IRQ
-       bool "Support sparse irq numbering"
-       depends on PCI_MSI || HT_IRQ
-       ---help---
-         This enables support for sparse irqs. This is useful for distro
-         kernels that want to define a high CONFIG_NR_CPUS value but still
-         want to have low kernel memory footprint on smaller machines.
-
-         ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
-           out the irq_desc[] array in a more NUMA-friendly way. )
-
-         If you don't know what to do here, say N.
-
-config NUMA_IRQ_DESC
-       def_bool y
-       depends on SPARSE_IRQ && NUMA
-
 config X86_MPPARSE
        bool "Enable MPS table" if ACPI
        default y
index a69b1ac9eaf82d639fd0ae51459d2dfdc79fbd1b..2fefa501d3ba64ee5db2e3541555a28ebe27598e 100644 (file)
@@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event;
 extern unsigned long apbt_quick_calibrate(void);
 extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
 extern void apbt_setup_secondary_clock(void);
-extern unsigned int boot_cpu_id;
 
 extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
 extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
index b185091bf19ce39f67a325ecd6aafb7c13f8cd23..4fab24de26b18404069994b908c79d3e4e481c50 100644 (file)
@@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int);
 
 DECLARE_PER_CPU(int, cpu_state);
 
-extern unsigned int boot_cpu_id;
 
 #endif /* _ASM_X86_CPU_H */
index f275e2244505b98308ca72e66e26c250d29c61a8..8d841505344e1009e6e7a7ff685ab72242a68e54 100644 (file)
@@ -3,4 +3,31 @@
 
 #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
 
+#ifdef CONFIG_INTR_REMAP
+static inline void prepare_irte(struct irte *irte, int vector,
+                               unsigned int dest)
+{
+       memset(irte, 0, sizeof(*irte));
+
+       irte->present = 1;
+       irte->dst_mode = apic->irq_dest_mode;
+       /*
+        * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
+        * actual level or edge trigger will be setup in the IO-APIC
+        * RTE. This will help simplify level triggered irq migration.
+        * For more details, see the comments (in io_apic.c) explainig IO-APIC
+        * irq migration in the presence of interrupt-remapping.
+       */
+       irte->trigger_mode = 0;
+       irte->dlvry_mode = apic->irq_delivery_mode;
+       irte->vector = vector;
+       irte->dest_id = IRTE_DEST(dest);
+       irte->redir_hint = 1;
+}
+#else
+static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
+{
+}
+#endif
+
 #endif /* _ASM_X86_IRQ_REMAPPING_H */
index 8dd77800ff5d7b444742ae02cc5168ccf8855fcd..08f75fb4f50976c4f574b1d40a86b65afc327064 100644 (file)
@@ -343,7 +343,7 @@ void apbt_setup_secondary_clock(void)
 
        /* Don't register boot CPU clockevent */
        cpu = smp_processor_id();
-       if (cpu == boot_cpu_id)
+       if (!cpu)
                return;
        /*
         * We need to calculate the scaled math multiplication factor for
index e3b534cda49a8097dde55400083d7eeb8f9c694c..8cf86fb3b4e38a122f6bfd480e3faaf08466aaa9 100644 (file)
@@ -1665,10 +1665,7 @@ int __init APIC_init_uniprocessor(void)
        }
 #endif
 
-#ifndef CONFIG_SMP
-       enable_IR_x2apic();
        default_setup_apic_routing();
-#endif
 
        verify_local_APIC();
        connect_bsp_APIC();
index 5c5b8f3dddb58686ba4afc8b314237c0c318370b..7556eb7a1a47edf4d4e69e40ecd649296e388374 100644 (file)
@@ -162,7 +162,7 @@ int __init arch_early_irq_init(void)
 
        cfg = irq_cfgx;
        count = ARRAY_SIZE(irq_cfgx);
-       node= cpu_to_node(boot_cpu_id);
+       node = cpu_to_node(0);
 
        for (i = 0; i < count; i++) {
                desc = irq_to_desc(i);
@@ -1382,21 +1382,7 @@ int setup_ioapic_entry(int apic_id, int irq,
                if (index < 0)
                        panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
 
-               memset(&irte, 0, sizeof(irte));
-
-               irte.present = 1;
-               irte.dst_mode = apic->irq_dest_mode;
-               /*
-                * Trigger mode in the IRTE will always be edge, and the
-                * actual level or edge trigger will be setup in the IO-APIC
-                * RTE. This will help simplify level triggered irq migration.
-                * For more details, see the comments above explainig IO-APIC
-                * irq migration in the presence of interrupt-remapping.
-                */
-               irte.trigger_mode = 0;
-               irte.dlvry_mode = apic->irq_delivery_mode;
-               irte.vector = vector;
-               irte.dest_id = IRTE_DEST(destination);
+               prepare_irte(&irte, vector, destination);
 
                /* Set source-id of interrupt request */
                set_ioapic_sid(&irte, apic_id);
@@ -1488,7 +1474,7 @@ static void __init setup_IO_APIC_irqs(void)
        int notcon = 0;
        struct irq_desc *desc;
        struct irq_cfg *cfg;
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
 
        apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
@@ -1553,7 +1539,7 @@ static void __init setup_IO_APIC_irqs(void)
 void setup_IO_APIC_irq_extra(u32 gsi)
 {
        int apic_id = 0, pin, idx, irq;
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
        struct irq_desc *desc;
        struct irq_cfg *cfg;
 
@@ -2932,7 +2918,7 @@ static inline void __init check_timer(void)
 {
        struct irq_desc *desc = irq_to_desc(0);
        struct irq_cfg *cfg = desc->chip_data;
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
        int no_pin1 = 0;
@@ -3286,7 +3272,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
 
 int create_irq(void)
 {
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
        unsigned int irq_want;
        int irq;
 
@@ -3340,14 +3326,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
                ir_index = map_irq_to_irte_handle(irq, &sub_handle);
                BUG_ON(ir_index == -1);
 
-               memset (&irte, 0, sizeof(irte));
-
-               irte.present = 1;
-               irte.dst_mode = apic->irq_dest_mode;
-               irte.trigger_mode = 0; /* edge */
-               irte.dlvry_mode = apic->irq_delivery_mode;
-               irte.vector = cfg->vector;
-               irte.dest_id = IRTE_DEST(dest);
+               prepare_irte(&irte, cfg->vector, dest);
 
                /* Set source-id of interrupt request */
                if (pdev)
@@ -3885,7 +3864,7 @@ int __init arch_probe_nr_irqs(void)
        if (nr < nr_irqs)
                nr_irqs = nr;
 
-       return 0;
+       return NR_IRQS_LEGACY;
 }
 #endif
 
@@ -3908,7 +3887,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
        if (dev)
                node = dev_to_node(dev);
        else
-               node = cpu_to_node(boot_cpu_id);
+               node = cpu_to_node(0);
 
        desc = irq_to_desc_alloc_node(irq, node);
        if (!desc) {
index 83e9be4778e2b597791306a85ca9ca527003e2c2..f9e4e6a54073e3d901d0475da9c5deceb21d55d8 100644 (file)
@@ -54,6 +54,9 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
  */
 void __init default_setup_apic_routing(void)
 {
+
+       enable_IR_x2apic();
+
 #ifdef CONFIG_X86_X2APIC
        if (x2apic_mode
 #ifdef CONFIG_X86_UV
index ba5f62f45f01e136e849894076917684ffcd8c40..a8b4d91b8394d5cd6d9ebc19106f9fa426330b78 100644 (file)
@@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        /* calling is from identify_secondary_cpu() ? */
-       if (c->cpu_index == boot_cpu_id)
+       if (!c->cpu_index)
                return;
 
        /*
index f2f9ac7da25ccfba6d5ba7ea44b63899ba672e97..15c671385f593b99064c1e98cfdba75e7e4a34fa 100644 (file)
@@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_early_init(c);
 
 #ifdef CONFIG_SMP
-       c->cpu_index = boot_cpu_id;
+       c->cpu_index = 0;
 #endif
        filter_cpuid_features(c, false);
 }
index b4389441efbbd8e791289aa936369e5e73917c18..695f17731e2382ce63bbbb4a9caaf2ec7e16e29e 100644 (file)
@@ -170,7 +170,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        /* calling is from identify_secondary_cpu() ? */
-       if (c->cpu_index == boot_cpu_id)
+       if (!c->cpu_index)
                return;
 
        /*
index ebdb85cf2686fa36702cd4d50b657f22de85b3bd..76b8cd953deed9f8a50d572cdc52b5edb68bc3b7 100644 (file)
@@ -96,7 +96,6 @@ static void __init nvidia_bugs(int num, int slot, int func)
 
 }
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
 #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
 static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
 {
@@ -115,7 +114,6 @@ static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
        d &= 0xff;
        return d;
 }
-#endif
 
 static void __init ati_bugs(int num, int slot, int func)
 {
index 035c8c529181fa351c042f8d6a5b8ec3240dec8f..b3ea9db39db6f7ee9f9dab00632f754d8a75827d 100644 (file)
@@ -36,7 +36,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
                if (!page)
                        goto out;
                pud = (pud_t *)page_address(page);
-               memset(pud, 0, PAGE_SIZE);
+               clear_page(pud);
                set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
        }
        pud = pud_offset(pgd, addr);
@@ -45,7 +45,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
                if (!page)
                        goto out;
                pmd = (pmd_t *)page_address(page);
-               memset(pmd, 0, PAGE_SIZE);
+               clear_page(pmd);
                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
        }
        pmd = pmd_offset(pud, addr);
index e3af342fe83ae7a57b8f6db6de4fb9541c4ab15e..7a4cf14223ba3288384909657d49edff7371623e 100644 (file)
@@ -84,7 +84,7 @@ static int __init reboot_setup(char *str)
                        }
                                /* we will leave sorting out the final value
                                   when we are ready to reboot, since we might not
-                                  have set up boot_cpu_id or smp_num_cpu */
+                                  have detected BSP APIC ID or smp_num_cpu */
                        break;
 #endif /* CONFIG_SMP */
 
index c3a4fbb2b996d00277d6523cb76b74e2c5944621..7d5ee08c982dc588e5f4c7c3e16c033d2244dcd4 100644 (file)
@@ -125,7 +125,6 @@ unsigned long max_pfn_mapped;
 RESERVE_BRK(dmi_alloc, 65536);
 #endif
 
-unsigned int boot_cpu_id __read_mostly;
 
 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
 unsigned long _brk_end = (unsigned long)__brk_base;
index a60df9ae645440789181acd318e12bd4c450aeff..2335c15c93a4c514a0ea7d42e08b7874c1cd28d9 100644 (file)
@@ -253,7 +253,7 @@ void __init setup_per_cpu_areas(void)
                 * Up to this point, the boot CPU has been using .init.data
                 * area.  Reload any changed state for the boot CPU.
                 */
-               if (cpu == boot_cpu_id)
+               if (!cpu)
                        switch_to_new_gdt(cpu);
        }
 
index 8b3bfc4dd70872680ff4b451a8b03903bd68727b..87a8c6b00f8d90cac4d889c5aab6aff19b05ee05 100644 (file)
@@ -1109,8 +1109,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
-       enable_IR_x2apic();
-       default_setup_apic_routing();
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
@@ -1118,6 +1116,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
                goto out;
        }
 
+       default_setup_apic_routing();
+
        preempt_disable();
        if (read_apic_id() != boot_cpu_physical_apicid) {
                panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
index 77d8c0f4817d5f10f88e725ad49b22642c1172c5..22b06f7660f4f44459ef62568659f745d2d8dc83 100644 (file)
@@ -1056,14 +1056,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 
        vcpu->arch.apic = apic;
 
-       apic->regs_page = alloc_page(GFP_KERNEL);
+       apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (apic->regs_page == NULL) {
                printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
                       vcpu->vcpu_id);
                goto nomem_free_apic;
        }
        apic->regs = page_address(apic->regs_page);
-       memset(apic->regs, 0, PAGE_SIZE);
        apic->vcpu = vcpu;
 
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
index bca79091b9d6158bcab97bdba2d8b767babeea46..558f2d33207636a54abaf0f024bf0bf942879044 100644 (file)
@@ -67,7 +67,7 @@ static __init void *alloc_low_page(void)
                panic("alloc_low_page: ran out of memory");
 
        adr = __va(pfn * PAGE_SIZE);
-       memset(adr, 0, PAGE_SIZE);
+       clear_page(adr);
        return adr;
 }
 
@@ -558,7 +558,7 @@ char swsusp_pg_dir[PAGE_SIZE]
 
 static inline void save_pg_dir(void)
 {
-       memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+       copy_page(swsusp_pg_dir, swapper_pg_dir);
 }
 #else /* !CONFIG_ACPI_SLEEP */
 static inline void save_pg_dir(void)
index 9a6674689a20f8e491f0a0f845079de9febaac5f..7c48ad4faca312c2f3e80b2971bb29b774994522 100644 (file)
@@ -293,7 +293,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
                panic("alloc_low_page: ran out of memory");
 
        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
-       memset(adr, 0, PAGE_SIZE);
+       clear_page(adr);
        *phys  = pfn * PAGE_SIZE;
        return adr;
 }
index 970ed579d4e4e86c6265828335b35e35024cfc38..240f86462a83c5bd32885467fe219a96104e24c7 100644 (file)
@@ -54,8 +54,8 @@ static __init int find_northbridge(void)
 static __init void early_get_boot_cpu_id(void)
 {
        /*
-        * need to get boot_cpu_id so can use that to create apicid_to_node
-        * in k8_scan_nodes()
+        * need to get the APIC ID of the BSP so can use that to
+        * create apicid_to_node in k8_scan_nodes()
         */
 #ifdef CONFIG_X86_MPPARSE
        /*
@@ -212,7 +212,7 @@ int __init k8_scan_nodes(void)
        bits = boot_cpu_data.x86_coreid_bits;
        cores = (1<<bits);
        apicid_base = 0;
-       /* need to get boot_cpu_id early for system with apicid lifting */
+       /* get the APIC ID of the BSP early for systems with apicid lifting */
        early_get_boot_cpu_id();
        if (boot_cpu_physical_apicid > 0) {
                pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
index c64a5d387de51d9e0e4c2225a8ae348c05b331e8..87508886cbbdadb28d2843047cdbbb90e7656b77 100644 (file)
@@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
index d4c50512a1ffc038795acb06248e1b245f63f3a5..88c9423500d838a1748c74b283d9b612a754d7fd 100644 (file)
@@ -141,9 +141,9 @@ typedef struct irq_data_isa {
        __u8            rcvhdr[8];
 } irq_data_isa;
 
-typedef union irq_data {
+typedef union act2000_irq_data {
        irq_data_isa isa;
-} irq_data;
+} act2000_irq_data;
 
 /*
  * Per card driver data
@@ -176,7 +176,7 @@ typedef struct act2000_card {
        char   *status_buf_read;
        char   *status_buf_write;
        char   *status_buf_end;
-       irq_data idat;                  /* Data used for IRQ handler        */
+       act2000_irq_data idat;          /* Data used for IRQ handler        */
        isdn_if interface;              /* Interface to upper layer         */
        char regname[35];               /* Name used for request_region     */
 } act2000_card;
index 6f9afcd5ca4e9301a07c8ec5a4c0473e255363c3..b133378d4dc9b1707c749102c345c3c2f7b2d7d4 100644 (file)
@@ -801,6 +801,16 @@ static void closecard(int cardnr)
        ll_unload(csta);
 }
 
+static irqreturn_t card_irq(int intno, void *dev_id)
+{
+       struct IsdnCardState *cs = dev_id;
+       irqreturn_t ret = cs->irq_func(intno, cs);
+
+       if (ret == IRQ_HANDLED)
+               cs->irq_cnt++;
+       return ret;
+}
+
 static int init_card(struct IsdnCardState *cs)
 {
        int     irq_cnt, cnt = 3, ret;
@@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs)
                ret = cs->cardmsg(cs, CARD_INIT, NULL);
                return(ret);
        }
-       irq_cnt = kstat_irqs(cs->irq);
+       irq_cnt = cs->irq_cnt = 0;
        printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
               cs->irq, irq_cnt);
-       if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) {
+       if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
                printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
                       cs->irq);
                return 1;
@@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs)
                /* Timeout 10ms */
                msleep(10);
                printk(KERN_INFO "%s: IRQ %d count %d\n",
-                      CardType[cs->typ], cs->irq, kstat_irqs(cs->irq));
-               if (kstat_irqs(cs->irq) == irq_cnt) {
+                      CardType[cs->typ], cs->irq, cs->irq_cnt);
+               if (cs->irq_cnt == irq_cnt) {
                        printk(KERN_WARNING
                               "%s: IRQ(%d) getting no interrupts during init %d\n",
                               CardType[cs->typ], cs->irq, 4 - cnt);
index 832a87855ffb5900fa22ded56b5586ed907ed886..32ab3924aa7341f5f390623faee36d9a308abc67 100644 (file)
@@ -959,6 +959,7 @@ struct IsdnCardState {
        u_long          event;
        struct work_struct tqueue;
        struct timer_list dbusytimer;
+       unsigned int    irq_cnt;
 #ifdef ERROR_STATISTIC
        int             err_crc;
        int             err_tx;
index 097f24d8bceb9ec2ad0d1440430c302d7f0a4a35..b9fda7018cef9a4b07cdeec3c71c3e29c5f63775 100644 (file)
@@ -78,7 +78,7 @@ struct sih {
        u8      irq_lines;              /* number of supported irq lines */
 
        /* SIR ignored -- set interrupt, for testing only */
-       struct irq_data {
+       struct sih_irq_data {
                u8      isr_offset;
                u8      imr_offset;
        } mask[2];
@@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        twl4030_irq_chip = dummy_irq_chip;
        twl4030_irq_chip.name = "twl4030";
 
-       twl4030_sih_irq_chip.ack = dummy_irq_chip.ack;
+       twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
 
        for (i = irq_base; i < irq_end; i++) {
                set_irq_chip_and_handler(i, &twl4030_irq_chip,
index c03243ad84b46b220324d499b18e0805e9fb827b..ef878823ee3bfbe22555f6ea23454f764cc53794 100644 (file)
@@ -72,6 +72,10 @@ typedef      void (*irq_flow_handler_t)(unsigned int irq,
 #define IRQ_ONESHOT            0x08000000      /* IRQ is not unmasked after hardirq */
 #define IRQ_NESTED_THREAD      0x10000000      /* IRQ is nested into another, no own handler thread */
 
+#define IRQF_MODIFY_MASK       \
+       (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+        IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
+
 #ifdef CONFIG_IRQ_PER_CPU
 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
 # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -80,36 +84,82 @@ typedef     void (*irq_flow_handler_t)(unsigned int irq,
 # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
 #endif
 
-struct proc_dir_entry;
 struct msi_desc;
+struct irq_2_iommu;
+
+/**
+ * struct irq_data - per irq and irq chip data passed down to chip functions
+ * @irq:               interrupt number
+ * @node:              node index useful for balancing
+ * @chip:              low level interrupt hardware access
+ * @handler_data:      per-IRQ data for the irq_chip methods
+ * @chip_data:         platform-specific per-chip private data for the chip
+ *                     methods, to allow shared chip implementations
+ * @msi_desc:          MSI descriptor
+ * @affinity:          IRQ affinity on SMP
+ * @irq_2_iommu:       iommu with this irq
+ *
+ * The fields here need to overlay the ones in irq_desc until we
+ * cleaned up the direct references and switched everything over to
+ * irq_data.
+ */
+struct irq_data {
+       unsigned int            irq;
+       unsigned int            node;
+       struct irq_chip         *chip;
+       void                    *handler_data;
+       void                    *chip_data;
+       struct msi_desc         *msi_desc;
+#ifdef CONFIG_SMP
+       cpumask_var_t           affinity;
+#endif
+#ifdef CONFIG_INTR_REMAP
+       struct irq_2_iommu      *irq_2_iommu;
+#endif
+};
 
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
  * @name:              name for /proc/interrupts
- * @startup:           start up the interrupt (defaults to ->enable if NULL)
- * @shutdown:          shut down the interrupt (defaults to ->disable if NULL)
- * @enable:            enable the interrupt (defaults to chip->unmask if NULL)
- * @disable:           disable the interrupt
- * @ack:               start of a new interrupt
- * @mask:              mask an interrupt source
- * @mask_ack:          ack and mask an interrupt source
- * @unmask:            unmask an interrupt source
- * @eoi:               end of interrupt - chip level
- * @end:               end of interrupt - flow level
- * @set_affinity:      set the CPU affinity on SMP machines
- * @retrigger:         resend an IRQ to the CPU
- * @set_type:          set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
- * @set_wake:          enable/disable power-management wake-on of an IRQ
+ * @startup:           deprecated, replaced by irq_startup
+ * @shutdown:          deprecated, replaced by irq_shutdown
+ * @enable:            deprecated, replaced by irq_enable
+ * @disable:           deprecated, replaced by irq_disable
+ * @ack:               deprecated, replaced by irq_ack
+ * @mask:              deprecated, replaced by irq_mask
+ * @mask_ack:          deprecated, replaced by irq_mask_ack
+ * @unmask:            deprecated, replaced by irq_unmask
+ * @eoi:               deprecated, replaced by irq_eoi
+ * @end:               deprecated, will go away with __do_IRQ()
+ * @set_affinity:      deprecated, replaced by irq_set_affinity
+ * @retrigger:         deprecated, replaced by irq_retrigger
+ * @set_type:          deprecated, replaced by irq_set_type
+ * @set_wake:          deprecated, replaced by irq_wake
+ * @bus_lock:          deprecated, replaced by irq_bus_lock
+ * @bus_sync_unlock:   deprecated, replaced by irq_bus_sync_unlock
  *
- * @bus_lock:          function to lock access to slow bus (i2c) chips
- * @bus_sync_unlock:   function to sync and unlock slow bus (i2c) chips
+ * @irq_startup:       start up the interrupt (defaults to ->enable if NULL)
+ * @irq_shutdown:      shut down the interrupt (defaults to ->disable if NULL)
+ * @irq_enable:                enable the interrupt (defaults to chip->unmask if NULL)
+ * @irq_disable:       disable the interrupt
+ * @irq_ack:           start of a new interrupt
+ * @irq_mask:          mask an interrupt source
+ * @irq_mask_ack:      ack and mask an interrupt source
+ * @irq_unmask:                unmask an interrupt source
+ * @irq_eoi:           end of interrupt
+ * @irq_set_affinity:  set the CPU affinity on SMP machines
+ * @irq_retrigger:     resend an IRQ to the CPU
+ * @irq_set_type:      set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
+ * @irq_set_wake:      enable/disable power-management wake-on of an IRQ
+ * @irq_bus_lock:      function to lock access to slow bus (i2c) chips
+ * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
  *
  * @release:           release function solely used by UML
- * @typename:          obsoleted by name, kept as migration helper
  */
 struct irq_chip {
        const char      *name;
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
        unsigned int    (*startup)(unsigned int irq);
        void            (*shutdown)(unsigned int irq);
        void            (*enable)(unsigned int irq);
@@ -130,154 +180,66 @@ struct irq_chip {
 
        void            (*bus_lock)(unsigned int irq);
        void            (*bus_sync_unlock)(unsigned int irq);
+#endif
+       unsigned int    (*irq_startup)(struct irq_data *data);
+       void            (*irq_shutdown)(struct irq_data *data);
+       void            (*irq_enable)(struct irq_data *data);
+       void            (*irq_disable)(struct irq_data *data);
+
+       void            (*irq_ack)(struct irq_data *data);
+       void            (*irq_mask)(struct irq_data *data);
+       void            (*irq_mask_ack)(struct irq_data *data);
+       void            (*irq_unmask)(struct irq_data *data);
+       void            (*irq_eoi)(struct irq_data *data);
+
+       int             (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
+       int             (*irq_retrigger)(struct irq_data *data);
+       int             (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
+       int             (*irq_set_wake)(struct irq_data *data, unsigned int on);
+
+       void            (*irq_bus_lock)(struct irq_data *data);
+       void            (*irq_bus_sync_unlock)(struct irq_data *data);
 
        /* Currently used only by UML, might disappear one day.*/
 #ifdef CONFIG_IRQ_RELEASE_METHOD
        void            (*release)(unsigned int irq, void *dev_id);
 #endif
-       /*
-        * For compatibility, ->typename is copied into ->name.
-        * Will disappear.
-        */
-       const char      *typename;
 };
 
-struct timer_rand_state;
-struct irq_2_iommu;
-/**
- * struct irq_desc - interrupt descriptor
- * @irq:               interrupt number for this descriptor
- * @timer_rand_state:  pointer to timer rand state struct
- * @kstat_irqs:                irq stats per cpu
- * @irq_2_iommu:       iommu with this irq
- * @handle_irq:                highlevel irq-events handler [if NULL, __do_IRQ()]
- * @chip:              low level interrupt hardware access
- * @msi_desc:          MSI descriptor
- * @handler_data:      per-IRQ data for the irq_chip methods
- * @chip_data:         platform-specific per-chip private data for the chip
- *                     methods, to allow shared chip implementations
- * @action:            the irq action chain
- * @status:            status information
- * @depth:             disable-depth, for nested irq_disable() calls
- * @wake_depth:                enable depth, for multiple set_irq_wake() callers
- * @irq_count:         stats field to detect stalled irqs
- * @last_unhandled:    aging timer for unhandled count
- * @irqs_unhandled:    stats field for spurious unhandled interrupts
- * @lock:              locking for SMP
- * @affinity:          IRQ affinity on SMP
- * @node:              node index useful for balancing
- * @pending_mask:      pending rebalanced interrupts
- * @threads_active:    number of irqaction threads currently running
- * @wait_for_threads:  wait queue for sync_irq to wait for threaded handlers
- * @dir:               /proc/irq/ procfs entry
- * @name:              flow handler name for /proc/interrupts output
- */
-struct irq_desc {
-       unsigned int            irq;
-       struct timer_rand_state *timer_rand_state;
-       unsigned int            *kstat_irqs;
-#ifdef CONFIG_INTR_REMAP
-       struct irq_2_iommu      *irq_2_iommu;
-#endif
-       irq_flow_handler_t      handle_irq;
-       struct irq_chip         *chip;
-       struct msi_desc         *msi_desc;
-       void                    *handler_data;
-       void                    *chip_data;
-       struct irqaction        *action;        /* IRQ action list */
-       unsigned int            status;         /* IRQ status */
-
-       unsigned int            depth;          /* nested irq disables */
-       unsigned int            wake_depth;     /* nested wake enables */
-       unsigned int            irq_count;      /* For detecting broken IRQs */
-       unsigned long           last_unhandled; /* Aging timer for unhandled count */
-       unsigned int            irqs_unhandled;
-       raw_spinlock_t          lock;
-#ifdef CONFIG_SMP
-       cpumask_var_t           affinity;
-       const struct cpumask    *affinity_hint;
-       unsigned int            node;
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_var_t           pending_mask;
-#endif
-#endif
-       atomic_t                threads_active;
-       wait_queue_head_t       wait_for_threads;
-#ifdef CONFIG_PROC_FS
-       struct proc_dir_entry   *dir;
-#endif
-       const char              *name;
-} ____cacheline_internodealigned_in_smp;
+/* This include will go away once we isolated irq_desc usage to core code */
+#include <linux/irqdesc.h>
 
-extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
-                                       struct irq_desc *desc, int node);
-extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
+/*
+ * Pick up the arch-dependent methods:
+ */
+#include <asm/hw_irq.h>
 
-#ifndef CONFIG_SPARSE_IRQ
-extern struct irq_desc irq_desc[NR_IRQS];
+#ifndef NR_IRQS_LEGACY
+# define NR_IRQS_LEGACY 0
 #endif
 
-#ifdef CONFIG_NUMA_IRQ_DESC
-extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
-#else
-static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
-{
-       return desc;
-}
+#ifndef ARCH_IRQ_INIT_FLAGS
+# define ARCH_IRQ_INIT_FLAGS   0
 #endif
 
-extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
-
-/*
- * Pick up the arch-dependent methods:
- */
-#include <asm/hw_irq.h>
+#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS)
 
+struct irqaction;
 extern int setup_irq(unsigned int irq, struct irqaction *new);
 extern void remove_irq(unsigned int irq, struct irqaction *act);
 
 #ifdef CONFIG_GENERIC_HARDIRQS
 
-#ifdef CONFIG_SMP
-
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void move_native_irq(int irq);
 void move_masked_irq(int irq);
-
-#else /* CONFIG_GENERIC_PENDING_IRQ */
-
-static inline void move_irq(int irq)
-{
-}
-
-static inline void move_native_irq(int irq)
-{
-}
-
-static inline void move_masked_irq(int irq)
-{
-}
-
-#endif /* CONFIG_GENERIC_PENDING_IRQ */
-
-#else /* CONFIG_SMP */
-
-#define move_native_irq(x)
-#define move_masked_irq(x)
-
-#endif /* CONFIG_SMP */
+#else
+static inline void move_native_irq(int irq) { }
+static inline void move_masked_irq(int irq) { }
+#endif
 
 extern int no_irq_affinity;
 
-static inline int irq_balancing_disabled(unsigned int irq)
-{
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-       return desc->status & IRQ_NO_BALANCING_MASK;
-}
-
 /* Handle irq action chains: */
 extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
 
@@ -293,42 +255,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_nested_irq(unsigned int irq);
 
-/*
- * Monolithic do_IRQ implementation.
- */
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-extern unsigned int __do_IRQ(unsigned int irq);
-#endif
-
-/*
- * Architectures call this to let the generic IRQ layer
- * handle an interrupt. If the descriptor is attached to an
- * irqchip-style controller then we call the ->handle_irq() handler,
- * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
- */
-static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
-{
-#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-       desc->handle_irq(irq, desc);
-#else
-       if (likely(desc->handle_irq))
-               desc->handle_irq(irq, desc);
-       else
-               __do_IRQ(irq);
-#endif
-}
-
-static inline void generic_handle_irq(unsigned int irq)
-{
-       generic_handle_irq_desc(irq, irq_to_desc(irq));
-}
-
 /* Handling of unhandled and spurious interrupts: */
 extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
                           irqreturn_t action_ret);
 
-/* Resending of interrupts :*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq);
 
 /* Enable/disable irq debugging output: */
 extern int noirqdebug_setup(char *str);
@@ -351,16 +281,6 @@ extern void
 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                  const char *name);
 
-/* caller has locked the irq_desc and both params are valid */
-static inline void __set_irq_handler_unlocked(int irq,
-                                             irq_flow_handler_t handler)
-{
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-       desc->handle_irq = handler;
-}
-
 /*
  * Set a highlevel flow handler for a given IRQ:
  */
@@ -384,21 +304,33 @@ set_irq_chained_handler(unsigned int irq,
 
 extern void set_irq_nested_thread(unsigned int irq, int nest);
 
-extern void set_irq_noprobe(unsigned int irq);
-extern void set_irq_probe(unsigned int irq);
+void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
+
+static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
+{
+       irq_modify_status(irq, 0, set);
+}
+
+static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
+{
+       irq_modify_status(irq, clr, 0);
+}
+
+static inline void set_irq_noprobe(unsigned int irq)
+{
+       irq_modify_status(irq, 0, IRQ_NOPROBE);
+}
+
+static inline void set_irq_probe(unsigned int irq)
+{
+       irq_modify_status(irq, IRQ_NOPROBE, 0);
+}
 
 /* Handle dynamic irq creation and destruction */
 extern unsigned int create_irq_nr(unsigned int irq_want, int node);
 extern int create_irq(void);
 extern void destroy_irq(unsigned int irq);
 
-/* Test to see if a driver has successfully requested an irq */
-static inline int irq_has_action(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       return desc->action != NULL;
-}
-
 /* Dynamic irq helper functions */
 extern void dynamic_irq_init(unsigned int irq);
 void dynamic_irq_init_keep_chip_data(unsigned int irq);
@@ -411,114 +343,91 @@ extern int set_irq_data(unsigned int irq, void *data);
 extern int set_irq_chip_data(unsigned int irq, void *data);
 extern int set_irq_type(unsigned int irq, unsigned int type);
 extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
+extern struct irq_data *irq_get_irq_data(unsigned int irq);
 
-#define get_irq_chip(irq)      (irq_to_desc(irq)->chip)
-#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
-#define get_irq_data(irq)      (irq_to_desc(irq)->handler_data)
-#define get_irq_msi(irq)       (irq_to_desc(irq)->msi_desc)
-
-#define get_irq_desc_chip(desc)                ((desc)->chip)
-#define get_irq_desc_chip_data(desc)   ((desc)->chip_data)
-#define get_irq_desc_data(desc)                ((desc)->handler_data)
-#define get_irq_desc_msi(desc)         ((desc)->msi_desc)
-
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
-#endif /* !CONFIG_S390 */
-
-#ifdef CONFIG_SMP
-/**
- * alloc_desc_masks - allocate cpumasks for irq_desc
- * @desc:      pointer to irq_desc struct
- * @node:      node which will be handling the cpumasks
- * @boot:      true if need bootmem
- *
- * Allocates affinity and pending_mask cpumask if required.
- * Returns true if successful (or not required).
- */
-static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
-                                                       bool boot)
+static inline struct irq_chip *get_irq_chip(unsigned int irq)
 {
-       gfp_t gfp = GFP_ATOMIC;
+       struct irq_data *d = irq_get_irq_data(irq);
+       return d ? d->chip : NULL;
+}
 
-       if (boot)
-               gfp = GFP_NOWAIT;
+static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
+{
+       return d->chip;
+}
 
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
-               return false;
+static inline void *get_irq_chip_data(unsigned int irq)
+{
+       struct irq_data *d = irq_get_irq_data(irq);
+       return d ? d->chip_data : NULL;
+}
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
-               free_cpumask_var(desc->affinity);
-               return false;
-       }
-#endif
-#endif
-       return true;
+static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
+{
+       return d->chip_data;
 }
 
-static inline void init_desc_masks(struct irq_desc *desc)
+static inline void *get_irq_data(unsigned int irq)
 {
-       cpumask_setall(desc->affinity);
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_clear(desc->pending_mask);
-#endif
+       struct irq_data *d = irq_get_irq_data(irq);
+       return d ? d->handler_data : NULL;
 }
 
-/**
- * init_copy_desc_masks - copy cpumasks for irq_desc
- * @old_desc:  pointer to old irq_desc struct
- * @new_desc:  pointer to new irq_desc struct
- *
- * Insures affinity and pending_masks are copied to new irq_desc.
- * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
- * irq_desc struct so the copy is redundant.
- */
+static inline void *irq_data_get_irq_data(struct irq_data *d)
+{
+       return d->handler_data;
+}
 
-static inline void init_copy_desc_masks(struct irq_desc *old_desc,
-                                       struct irq_desc *new_desc)
+static inline struct msi_desc *get_irq_msi(unsigned int irq)
 {
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       cpumask_copy(new_desc->affinity, old_desc->affinity);
+       struct irq_data *d = irq_get_irq_data(irq);
+       return d ? d->msi_desc : NULL;
+}
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
-#endif
-#endif
+static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+{
+       return d->msi_desc;
 }
 
-static inline void free_desc_masks(struct irq_desc *old_desc,
-                                  struct irq_desc *new_desc)
+#ifdef CONFIG_INTR_REMAP
+static inline struct irq_2_iommu *get_irq_iommu(unsigned int irq)
 {
-       free_cpumask_var(old_desc->affinity);
+       struct irq_data *d = irq_get_irq_data(irq);
+       return d ? d->irq_2_iommu : NULL;
+}
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       free_cpumask_var(old_desc->pending_mask);
-#endif
+static inline struct irq_2_iommu *irq_data_get_iommu(struct irq_data *d)
+{
+       return d->irq_2_iommu;
 }
+#endif
 
-#else /* !CONFIG_SMP */
+int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
+void irq_free_descs(unsigned int irq, unsigned int cnt);
+int irq_reserve_irqs(unsigned int from, unsigned int cnt);
 
-static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
-                                                               bool boot)
+static inline int irq_alloc_desc(int node)
 {
-       return true;
+       return irq_alloc_descs(-1, 0, 1, node);
 }
 
-static inline void init_desc_masks(struct irq_desc *desc)
+static inline int irq_alloc_desc_at(unsigned int at, int node)
 {
+       return irq_alloc_descs(at, at, 1, node);
 }
 
-static inline void init_copy_desc_masks(struct irq_desc *old_desc,
-                                       struct irq_desc *new_desc)
+static inline int irq_alloc_desc_from(unsigned int from, int node)
 {
+       return irq_alloc_descs(-1, from, 1, node);
 }
 
-static inline void free_desc_masks(struct irq_desc *old_desc,
-                                  struct irq_desc *new_desc)
+static inline void irq_free_desc(unsigned int irq)
 {
+       irq_free_descs(irq, 1);
 }
-#endif /* CONFIG_SMP */
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
+
+#endif /* !CONFIG_S390 */
 
 #endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
new file mode 100644 (file)
index 0000000..22e426f
--- /dev/null
@@ -0,0 +1,171 @@
+#ifndef _LINUX_IRQDESC_H
+#define _LINUX_IRQDESC_H
+
+/*
+ * Core internal functions to deal with irq descriptors
+ *
+ * This include will move to kernel/irq once we cleaned up the tree.
+ * For now it's included from <linux/irq.h>
+ */
+
+struct proc_dir_entry;
+struct timer_rand_state;
+struct irq_2_iommu;
+/**
+ * struct irq_desc - interrupt descriptor
+ * @irq_data:          per irq and chip data passed down to chip functions
+ * @timer_rand_state:  pointer to timer rand state struct
+ * @kstat_irqs:                irq stats per cpu
+ * @handle_irq:                highlevel irq-events handler [if NULL, __do_IRQ()]
+ * @action:            the irq action chain
+ * @status:            status information
+ * @depth:             disable-depth, for nested irq_disable() calls
+ * @wake_depth:                enable depth, for multiple set_irq_wake() callers
+ * @irq_count:         stats field to detect stalled irqs
+ * @last_unhandled:    aging timer for unhandled count
+ * @irqs_unhandled:    stats field for spurious unhandled interrupts
+ * @lock:              locking for SMP
+ * @pending_mask:      pending rebalanced interrupts
+ * @threads_active:    number of irqaction threads currently running
+ * @wait_for_threads:  wait queue for sync_irq to wait for threaded handlers
+ * @dir:               /proc/irq/ procfs entry
+ * @name:              flow handler name for /proc/interrupts output
+ */
+struct irq_desc {
+
+#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
+       struct irq_data         irq_data;
+#else
+       /*
+        * This union will go away, once we fixed the direct access to
+        * irq_desc all over the place. The direct fields are a 1:1
+        * overlay of irq_data.
+        */
+       union {
+               struct irq_data         irq_data;
+               struct {
+                       unsigned int            irq;
+                       unsigned int            node;
+                       struct irq_chip         *chip;
+                       void                    *handler_data;
+                       void                    *chip_data;
+                       struct msi_desc         *msi_desc;
+#ifdef CONFIG_SMP
+                       cpumask_var_t           affinity;
+#endif
+#ifdef CONFIG_INTR_REMAP
+                       struct irq_2_iommu      *irq_2_iommu;
+#endif
+               };
+       };
+#endif
+
+       struct timer_rand_state *timer_rand_state;
+       unsigned int            *kstat_irqs;
+       irq_flow_handler_t      handle_irq;
+       struct irqaction        *action;        /* IRQ action list */
+       unsigned int            status;         /* IRQ status */
+
+       unsigned int            depth;          /* nested irq disables */
+       unsigned int            wake_depth;     /* nested wake enables */
+       unsigned int            irq_count;      /* For detecting broken IRQs */
+       unsigned long           last_unhandled; /* Aging timer for unhandled count */
+       unsigned int            irqs_unhandled;
+       raw_spinlock_t          lock;
+#ifdef CONFIG_SMP
+       const struct cpumask    *affinity_hint;
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_var_t           pending_mask;
+#endif
+#endif
+       atomic_t                threads_active;
+       wait_queue_head_t       wait_for_threads;
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry   *dir;
+#endif
+       const char              *name;
+} ____cacheline_internodealigned_in_smp;
+
+extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
+                                       struct irq_desc *desc, int node);
+extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
+
+#ifndef CONFIG_SPARSE_IRQ
+extern struct irq_desc irq_desc[NR_IRQS];
+#endif
+
+#ifdef CONFIG_NUMA_IRQ_DESC
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
+#else
+static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
+{
+       return desc;
+}
+#endif
+
+extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+#define get_irq_desc_chip(desc)                ((desc)->irq_data.chip)
+#define get_irq_desc_chip_data(desc)   ((desc)->irq_data.chip_data)
+#define get_irq_desc_data(desc)                ((desc)->irq_data.handler_data)
+#define get_irq_desc_msi(desc)         ((desc)->irq_data.msi_desc)
+
+/*
+ * Monolithic do_IRQ implementation.
+ */
+#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
+extern unsigned int __do_IRQ(unsigned int irq);
+#endif
+
+/*
+ * Architectures call this to let the generic IRQ layer
+ * handle an interrupt. If the descriptor is attached to an
+ * irqchip-style controller then we call the ->handle_irq() handler,
+ * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ */
+static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
+       desc->handle_irq(irq, desc);
+#else
+       if (likely(desc->handle_irq))
+               desc->handle_irq(irq, desc);
+       else
+               __do_IRQ(irq);
+#endif
+}
+
+static inline void generic_handle_irq(unsigned int irq)
+{
+       generic_handle_irq_desc(irq, irq_to_desc(irq));
+}
+
+/* Test to see if a driver has successfully requested an irq */
+static inline int irq_has_action(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       return desc->action != NULL;
+}
+
+static inline int irq_balancing_disabled(unsigned int irq)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+       return desc->status & IRQ_NO_BALANCING_MASK;
+}
+
+/* caller has locked the irq_desc and both params are valid */
+static inline void __set_irq_handler_unlocked(int irq,
+                                             irq_flow_handler_t handler)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+       desc->handle_irq = handler;
+}
+#endif
+
+#endif
index 7bf89bc8cbca32762d082664c1839d8d642f8782..05aa8c23483ff502bdf94e76365236fa44ff9721 100644 (file)
@@ -25,6 +25,7 @@
 
 extern int nr_irqs;
 extern struct irq_desc *irq_to_desc(unsigned int irq);
+unsigned int irq_get_next_irq(unsigned int offset);
 
 # define for_each_irq_desc(irq, desc)                                  \
        for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs;           \
@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
 #define irq_node(irq)  0
 #endif
 
+# define for_each_active_irq(irq)                      \
+       for (irq = irq_get_next_irq(0); irq < nr_irqs;  \
+            irq = irq_get_next_irq(irq + 1))
+
 #endif /* CONFIG_GENERIC_HARDIRQS */
 
 #define for_each_irq_nr(irq)                   \
index 06aed8305bf3bc38061dc42c168c3b9d83f29492..17d050ce7ab8d3c5ca8812b11e2371aeed93121e 100644 (file)
@@ -424,14 +424,6 @@ do {                                                               \
 
 #endif /* CONFIG_LOCKDEP */
 
-#ifdef CONFIG_GENERIC_HARDIRQS
-extern void early_init_irq_lock_class(void);
-#else
-static inline void early_init_irq_lock_class(void)
-{
-}
-#endif
-
 #ifdef CONFIG_TRACE_IRQFLAGS
 extern void early_boot_irqs_off(void);
 extern void early_boot_irqs_on(void);
index 2de5b1cbadd9e47138f879d23cc4d2d5066d32d7..1df1a87cc595938210fb317d4bf9d9640af46db7 100644 (file)
@@ -332,6 +332,8 @@ config AUDIT_TREE
        depends on AUDITSYSCALL
        select FSNOTIFY
 
+source "kernel/irq/Kconfig"
+
 menu "RCU Subsystem"
 
 choice
index 94ab488039aab1dde97d442f785046475f09c978..9684c9670b48ef2157919ea97bea37c81e32f2f8 100644 (file)
@@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void)
 
        local_irq_disable();
        early_boot_irqs_off();
-       early_init_irq_lock_class();
 
 /*
  * Interrupts are still disabled. Do necessary setups, then
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
new file mode 100644 (file)
index 0000000..a42c019
--- /dev/null
@@ -0,0 +1,58 @@
+config HAVE_GENERIC_HARDIRQS
+       def_bool n
+
+if HAVE_GENERIC_HARDIRQS
+menu "IRQ subsystem"
+#
+# Interrupt subsystem related configuration options
+#
+config GENERIC_HARDIRQS
+       def_bool y
+
+config GENERIC_HARDIRQS_NO__DO_IRQ
+       def_bool y
+
+# Select this to disable the deprecated stuff
+config GENERIC_HARDIRQS_NO_DEPRECATED
+       def_bool n
+
+# Options selectable by the architecture code
+config HAVE_SPARSE_IRQ
+       def_bool n
+
+config GENERIC_IRQ_PROBE
+       def_bool n
+
+config GENERIC_PENDING_IRQ
+       def_bool n
+
+if SPARSE_IRQ && NUMA
+config NUMA_IRQ_DESC
+       def_bool n
+endif
+
+config AUTO_IRQ_AFFINITY
+       def_bool n
+
+config IRQ_PER_CPU
+       def_bool n
+
+config HARDIRQS_SW_RESEND
+       def_bool n
+
+config SPARSE_IRQ
+       bool "Support sparse irq numbering"
+       depends on HAVE_SPARSE_IRQ
+       ---help---
+
+         Sparse irq numbering is useful for distro kernels that want
+         to define a high CONFIG_NR_CPUS value but still want to have
+         low kernel memory footprint on smaller machines.
+
+         ( Sparse irqs can also be beneficial on NUMA boxes, as they spread
+           out the interrupt descriptors in a more NUMA-friendly way. )
+
+         If you don't know what to do here, say N.
+
+endmenu
+endif
index 7d047808419da88e273fba8cd1efd88d9aaa5bcd..1eaab0da56dbc692239deac0bf0f3118bfef0ded 100644 (file)
@@ -1,5 +1,5 @@
 
-obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
+obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
index 2295a31ef110dab62a6e665358a9cb290fbe990e..505798f86c36d774afaa4938f2097ac32cd21e0a 100644 (file)
@@ -57,9 +57,10 @@ unsigned long probe_irq_on(void)
                         * Some chips need to know about probing in
                         * progress:
                         */
-                       if (desc->chip->set_type)
-                               desc->chip->set_type(i, IRQ_TYPE_PROBE);
-                       desc->chip->startup(i);
+                       if (desc->irq_data.chip->irq_set_type)
+                               desc->irq_data.chip->irq_set_type(&desc->irq_data,
+                                                        IRQ_TYPE_PROBE);
+                       desc->irq_data.chip->irq_startup(&desc->irq_data);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -76,7 +77,7 @@ unsigned long probe_irq_on(void)
                raw_spin_lock_irq(&desc->lock);
                if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
                        desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
-                       if (desc->chip->startup(i))
+                       if (desc->irq_data.chip->irq_startup(&desc->irq_data))
                                desc->status |= IRQ_PENDING;
                }
                raw_spin_unlock_irq(&desc->lock);
@@ -98,7 +99,7 @@ unsigned long probe_irq_on(void)
                        /* It triggered already - consider it spurious. */
                        if (!(status & IRQ_WAITING)) {
                                desc->status = status & ~IRQ_AUTODETECT;
-                               desc->chip->shutdown(i);
+                               desc->irq_data.chip->irq_shutdown(&desc->irq_data);
                        } else
                                if (i < 32)
                                        mask |= 1 << i;
@@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val)
                                mask |= 1 << i;
 
                        desc->status = status & ~IRQ_AUTODETECT;
-                       desc->chip->shutdown(i);
+                       desc->irq_data.chip->irq_shutdown(&desc->irq_data);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val)
                                nr_of_irqs++;
                        }
                        desc->status = status & ~IRQ_AUTODETECT;
-                       desc->chip->shutdown(i);
+                       desc->irq_data.chip->irq_shutdown(&desc->irq_data);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
index b7091d5ca2f829ae61f0140ae6b1901213bfab62..3405761d6224a59f9a98951fb7d831379c39f658 100644 (file)
@@ -31,19 +31,19 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
 
        /* Ensure we don't have left over values from a previous use of this irq */
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->status = IRQ_DISABLED;
-       desc->chip = &no_irq_chip;
+       desc->status = IRQ_DEFAULT_INIT_FLAGS;
+       desc->irq_data.chip = &no_irq_chip;
        desc->handle_irq = handle_bad_irq;
        desc->depth = 1;
-       desc->msi_desc = NULL;
-       desc->handler_data = NULL;
+       desc->irq_data.msi_desc = NULL;
+       desc->irq_data.handler_data = NULL;
        if (!keep_chip_data)
-               desc->chip_data = NULL;
+               desc->irq_data.chip_data = NULL;
        desc->action = NULL;
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
 #ifdef CONFIG_SMP
-       cpumask_setall(desc->affinity);
+       cpumask_setall(desc->irq_data.affinity);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        cpumask_clear(desc->pending_mask);
 #endif
@@ -64,7 +64,7 @@ void dynamic_irq_init(unsigned int irq)
  *     dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
  *     @irq:   irq number to initialize
  *
- *     does not set irq_to_desc(irq)->chip_data to NULL
+ *     does not set irq_to_desc(irq)->irq_data.chip_data to NULL
  */
 void dynamic_irq_init_keep_chip_data(unsigned int irq)
 {
@@ -88,12 +88,12 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
                        irq);
                return;
        }
-       desc->msi_desc = NULL;
-       desc->handler_data = NULL;
+       desc->irq_data.msi_desc = NULL;
+       desc->irq_data.handler_data = NULL;
        if (!keep_chip_data)
-               desc->chip_data = NULL;
+               desc->irq_data.chip_data = NULL;
        desc->handle_irq = handle_bad_irq;
-       desc->chip = &no_irq_chip;
+       desc->irq_data.chip = &no_irq_chip;
        desc->name = NULL;
        clear_kstat_irqs(desc);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -112,7 +112,7 @@ void dynamic_irq_cleanup(unsigned int irq)
  *     dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
  *     @irq:   irq number to initialize
  *
- *     does not set irq_to_desc(irq)->chip_data to NULL
+ *     does not set irq_to_desc(irq)->irq_data.chip_data to NULL
  */
 void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
 {
@@ -140,7 +140,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
 
        raw_spin_lock_irqsave(&desc->lock, flags);
        irq_chip_set_defaults(chip);
-       desc->chip = chip;
+       desc->irq_data.chip = chip;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        return 0;
@@ -193,7 +193,7 @@ int set_irq_data(unsigned int irq, void *data)
        }
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->handler_data = data;
+       desc->irq_data.handler_data = data;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return 0;
 }
@@ -218,7 +218,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
        }
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->msi_desc = entry;
+       desc->irq_data.msi_desc = entry;
        if (entry)
                entry->irq = irq;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -243,19 +243,27 @@ int set_irq_chip_data(unsigned int irq, void *data)
                return -EINVAL;
        }
 
-       if (!desc->chip) {
+       if (!desc->irq_data.chip) {
                printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
                return -EINVAL;
        }
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->chip_data = data;
+       desc->irq_data.chip_data = data;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        return 0;
 }
 EXPORT_SYMBOL(set_irq_chip_data);
 
+struct irq_data *irq_get_irq_data(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       return desc ? &desc->irq_data : NULL;
+}
+EXPORT_SYMBOL_GPL(irq_get_irq_data);
+
 /**
  *     set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
  *
@@ -287,93 +295,216 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread);
 /*
  * default enable function
  */
-static void default_enable(unsigned int irq)
+static void default_enable(struct irq_data *data)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_desc *desc = irq_data_to_desc(data);
 
-       desc->chip->unmask(irq);
+       desc->irq_data.chip->irq_unmask(&desc->irq_data);
        desc->status &= ~IRQ_MASKED;
 }
 
 /*
  * default disable function
  */
-static void default_disable(unsigned int irq)
+static void default_disable(struct irq_data *data)
 {
 }
 
 /*
  * default startup function
  */
-static unsigned int default_startup(unsigned int irq)
+static unsigned int default_startup(struct irq_data *data)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_desc *desc = irq_data_to_desc(data);
 
-       desc->chip->enable(irq);
+       desc->irq_data.chip->irq_enable(data);
        return 0;
 }
 
 /*
  * default shutdown function
  */
-static void default_shutdown(unsigned int irq)
+static void default_shutdown(struct irq_data *data)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_desc *desc = irq_data_to_desc(data);
 
-       desc->chip->mask(irq);
+       desc->irq_data.chip->irq_mask(&desc->irq_data);
        desc->status |= IRQ_MASKED;
 }
 
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
+/* Temporary migration helpers */
+static void compat_irq_mask(struct irq_data *data)
+{
+       data->chip->mask(data->irq);
+}
+
+static void compat_irq_unmask(struct irq_data *data)
+{
+       data->chip->unmask(data->irq);
+}
+
+static void compat_irq_ack(struct irq_data *data)
+{
+       data->chip->ack(data->irq);
+}
+
+static void compat_irq_mask_ack(struct irq_data *data)
+{
+       data->chip->mask_ack(data->irq);
+}
+
+static void compat_irq_eoi(struct irq_data *data)
+{
+       data->chip->eoi(data->irq);
+}
+
+static void compat_irq_enable(struct irq_data *data)
+{
+       data->chip->enable(data->irq);
+}
+
+static void compat_irq_disable(struct irq_data *data)
+{
+       data->chip->disable(data->irq);
+}
+
+static void compat_irq_shutdown(struct irq_data *data)
+{
+       data->chip->shutdown(data->irq);
+}
+
+static unsigned int compat_irq_startup(struct irq_data *data)
+{
+       return data->chip->startup(data->irq);
+}
+
+static int compat_irq_set_affinity(struct irq_data *data,
+                                  const struct cpumask *dest, bool force)
+{
+       return data->chip->set_affinity(data->irq, dest);
+}
+
+static int compat_irq_set_type(struct irq_data *data, unsigned int type)
+{
+       return data->chip->set_type(data->irq, type);
+}
+
+static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+       return data->chip->set_wake(data->irq, on);
+}
+
+static int compat_irq_retrigger(struct irq_data *data)
+{
+       return data->chip->retrigger(data->irq);
+}
+
+static void compat_bus_lock(struct irq_data *data)
+{
+       data->chip->bus_lock(data->irq);
+}
+
+static void compat_bus_sync_unlock(struct irq_data *data)
+{
+       data->chip->bus_sync_unlock(data->irq);
+}
+#endif
+
 /*
  * Fixup enable/disable function pointers
  */
 void irq_chip_set_defaults(struct irq_chip *chip)
 {
-       if (!chip->enable)
-               chip->enable = default_enable;
-       if (!chip->disable)
-               chip->disable = default_disable;
-       if (!chip->startup)
-               chip->startup = default_startup;
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
+       /*
+        * Compat fixup functions need to be before we set the
+        * defaults for enable/disable/startup/shutdown
+        */
+       if (chip->enable)
+               chip->irq_enable = compat_irq_enable;
+       if (chip->disable)
+               chip->irq_disable = compat_irq_disable;
+       if (chip->shutdown)
+               chip->irq_shutdown = compat_irq_shutdown;
+       if (chip->startup)
+               chip->irq_startup = compat_irq_startup;
+#endif
+       /*
+        * The real defaults
+        */
+       if (!chip->irq_enable)
+               chip->irq_enable = default_enable;
+       if (!chip->irq_disable)
+               chip->irq_disable = default_disable;
+       if (!chip->irq_startup)
+               chip->irq_startup = default_startup;
        /*
-        * We use chip->disable, when the user provided its own. When
-        * we have default_disable set for chip->disable, then we need
+        * We use chip->irq_disable, when the user provided its own. When
+        * we have default_disable set for chip->irq_disable, then we need
         * to use default_shutdown, otherwise the irq line is not
         * disabled on free_irq():
         */
-       if (!chip->shutdown)
-               chip->shutdown = chip->disable != default_disable ?
-                       chip->disable : default_shutdown;
-       if (!chip->name)
-               chip->name = chip->typename;
+       if (!chip->irq_shutdown)
+               chip->irq_shutdown = chip->irq_disable != default_disable ?
+                       chip->irq_disable : default_shutdown;
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
        if (!chip->end)
                chip->end = dummy_irq_chip.end;
+
+       /*
+        * Now fix up the remaining compat handlers
+        */
+       if (chip->bus_lock)
+               chip->irq_bus_lock = compat_bus_lock;
+       if (chip->bus_sync_unlock)
+               chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
+       if (chip->mask)
+               chip->irq_mask = compat_irq_mask;
+       if (chip->unmask)
+               chip->irq_unmask = compat_irq_unmask;
+       if (chip->ack)
+               chip->irq_ack = compat_irq_ack;
+       if (chip->mask_ack)
+               chip->irq_mask_ack = compat_irq_mask_ack;
+       if (chip->eoi)
+               chip->irq_eoi = compat_irq_eoi;
+       if (chip->set_affinity)
+               chip->irq_set_affinity = compat_irq_set_affinity;
+       if (chip->set_type)
+               chip->irq_set_type = compat_irq_set_type;
+       if (chip->set_wake)
+               chip->irq_set_wake = compat_irq_set_wake;
+       if (chip->retrigger)
+               chip->irq_retrigger = compat_irq_retrigger;
+#endif
 }
 
-static inline void mask_ack_irq(struct irq_desc *desc, int irq)
+static inline void mask_ack_irq(struct irq_desc *desc)
 {
-       if (desc->chip->mask_ack)
-               desc->chip->mask_ack(irq);
+       if (desc->irq_data.chip->irq_mask_ack)
+               desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
        else {
-               desc->chip->mask(irq);
-               if (desc->chip->ack)
-                       desc->chip->ack(irq);
+               desc->irq_data.chip->irq_mask(&desc->irq_data);
+               if (desc->irq_data.chip->irq_ack)
+                       desc->irq_data.chip->irq_ack(&desc->irq_data);
        }
        desc->status |= IRQ_MASKED;
 }
 
-static inline void mask_irq(struct irq_desc *desc, int irq)
+static inline void mask_irq(struct irq_desc *desc)
 {
-       if (desc->chip->mask) {
-               desc->chip->mask(irq);
+       if (desc->irq_data.chip->irq_mask) {
+               desc->irq_data.chip->irq_mask(&desc->irq_data);
                desc->status |= IRQ_MASKED;
        }
 }
 
-static inline void unmask_irq(struct irq_desc *desc, int irq)
+static inline void unmask_irq(struct irq_desc *desc)
 {
-       if (desc->chip->unmask) {
-               desc->chip->unmask(irq);
+       if (desc->irq_data.chip->irq_unmask) {
+               desc->irq_data.chip->irq_unmask(&desc->irq_data);
                desc->status &= ~IRQ_MASKED;
        }
 }
@@ -476,7 +607,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
        irqreturn_t action_ret;
 
        raw_spin_lock(&desc->lock);
-       mask_ack_irq(desc, irq);
+       mask_ack_irq(desc);
 
        if (unlikely(desc->status & IRQ_INPROGRESS))
                goto out_unlock;
@@ -502,7 +633,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
        desc->status &= ~IRQ_INPROGRESS;
 
        if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
-               unmask_irq(desc, irq);
+               unmask_irq(desc);
 out_unlock:
        raw_spin_unlock(&desc->lock);
 }
@@ -539,7 +670,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
        action = desc->action;
        if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
                desc->status |= IRQ_PENDING;
-               mask_irq(desc, irq);
+               mask_irq(desc);
                goto out;
        }
 
@@ -554,7 +685,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
        raw_spin_lock(&desc->lock);
        desc->status &= ~IRQ_INPROGRESS;
 out:
-       desc->chip->eoi(irq);
+       desc->irq_data.chip->irq_eoi(&desc->irq_data);
 
        raw_spin_unlock(&desc->lock);
 }
@@ -590,14 +721,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
        if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
                    !desc->action)) {
                desc->status |= (IRQ_PENDING | IRQ_MASKED);
-               mask_ack_irq(desc, irq);
+               mask_ack_irq(desc);
                goto out_unlock;
        }
        kstat_incr_irqs_this_cpu(irq, desc);
 
        /* Start handling the irq */
-       if (desc->chip->ack)
-               desc->chip->ack(irq);
+       desc->irq_data.chip->irq_ack(&desc->irq_data);
 
        /* Mark the IRQ currently in progress.*/
        desc->status |= IRQ_INPROGRESS;
@@ -607,7 +737,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
                irqreturn_t action_ret;
 
                if (unlikely(!action)) {
-                       mask_irq(desc, irq);
+                       mask_irq(desc);
                        goto out_unlock;
                }
 
@@ -619,7 +749,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
                if (unlikely((desc->status &
                               (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
                              (IRQ_PENDING | IRQ_MASKED))) {
-                       unmask_irq(desc, irq);
+                       unmask_irq(desc);
                }
 
                desc->status &= ~IRQ_PENDING;
@@ -650,15 +780,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
 
        kstat_incr_irqs_this_cpu(irq, desc);
 
-       if (desc->chip->ack)
-               desc->chip->ack(irq);
+       if (desc->irq_data.chip->irq_ack)
+               desc->irq_data.chip->irq_ack(&desc->irq_data);
 
        action_ret = handle_IRQ_event(irq, desc->action);
        if (!noirqdebug)
                note_interrupt(irq, desc, action_ret);
 
-       if (desc->chip->eoi)
-               desc->chip->eoi(irq);
+       if (desc->irq_data.chip->irq_eoi)
+               desc->irq_data.chip->irq_eoi(&desc->irq_data);
 }
 
 void
@@ -676,7 +806,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 
        if (!handle)
                handle = handle_bad_irq;
-       else if (desc->chip == &no_irq_chip) {
+       else if (desc->irq_data.chip == &no_irq_chip) {
                printk(KERN_WARNING "Trying to install %sinterrupt handler "
                       "for IRQ%d\n", is_chained ? "chained " : "", irq);
                /*
@@ -686,16 +816,16 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                 * prevent us to setup the interrupt at all. Switch it to
                 * dummy_irq_chip for easy transition.
                 */
-               desc->chip = &dummy_irq_chip;
+               desc->irq_data.chip = &dummy_irq_chip;
        }
 
-       chip_bus_lock(irq, desc);
+       chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
 
        /* Uninstall? */
        if (handle == handle_bad_irq) {
-               if (desc->chip != &no_irq_chip)
-                       mask_ack_irq(desc, irq);
+               if (desc->irq_data.chip != &no_irq_chip)
+                       mask_ack_irq(desc);
                desc->status |= IRQ_DISABLED;
                desc->depth = 1;
        }
@@ -706,10 +836,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                desc->status &= ~IRQ_DISABLED;
                desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
                desc->depth = 0;
-               desc->chip->startup(irq);
+               desc->irq_data.chip->irq_startup(&desc->irq_data);
        }
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-       chip_bus_sync_unlock(irq, desc);
+       chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL_GPL(__set_irq_handler);
 
@@ -729,32 +859,20 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
        __set_irq_handler(irq, handle, 0, name);
 }
 
-void set_irq_noprobe(unsigned int irq)
+void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
 
-       if (!desc) {
-               printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
+       if (!desc)
                return;
-       }
 
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->status |= IRQ_NOPROBE;
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-void set_irq_probe(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       unsigned long flags;
-
-       if (!desc) {
-               printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
-               return;
-       }
+       /* Sanitize flags */
+       set &= IRQF_MODIFY_MASK;
+       clr &= IRQF_MODIFY_MASK;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->status &= ~IRQ_NOPROBE;
+       desc->status &= ~clr;
+       desc->status |= set;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
new file mode 100644 (file)
index 0000000..918dea9
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
+ *
+ * This file contains the dummy interrupt chip implementation
+ */
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "internals.h"
+
+/*
+ * What should we do if we get a hw irq event on an illegal vector?
+ * Each architecture has to answer this themself.
+ */
+static void ack_bad(struct irq_data *data)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+
+       print_irq_desc(data->irq, desc);
+       ack_bad_irq(data->irq);
+}
+
+/*
+ * NOP functions
+ */
+static void noop(struct irq_data *data) { }
+
+static unsigned int noop_ret(struct irq_data *data)
+{
+       return 0;
+}
+
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_CRUFT
+static void compat_noop(unsigned int irq) { }
+#define END_INIT .end = compat_noop
+#else
+#define END_INIT
+#endif
+
+/*
+ * Generic no controller implementation
+ */
+struct irq_chip no_irq_chip = {
+       .name           = "none",
+       .irq_startup    = noop_ret,
+       .irq_shutdown   = noop,
+       .irq_enable     = noop,
+       .irq_disable    = noop,
+       .irq_ack        = ack_bad,
+       END_INIT
+};
+
+/*
+ * Generic dummy implementation which can be used for
+ * real dumb interrupt sources
+ */
+struct irq_chip dummy_irq_chip = {
+       .name           = "dummy",
+       .irq_startup    = noop_ret,
+       .irq_shutdown   = noop,
+       .irq_enable     = noop,
+       .irq_disable    = noop,
+       .irq_ack        = noop,
+       .irq_mask       = noop,
+       .irq_unmask     = noop,
+       END_INIT
+};
index 27e5c69112235c2f0cebe18981bb1f5504ed6807..e2347eb6330682501e99bb3214ccfa9015a8a521 100644 (file)
  */
 
 #include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/random.h>
+#include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
-#include <linux/rculist.h>
-#include <linux/hash.h>
-#include <linux/radix-tree.h>
+
 #include <trace/events/irq.h>
 
 #include "internals.h"
 
-/*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
-struct lock_class_key irq_desc_lock_class;
-
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
  * @irq:       the interrupt number
@@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
        ack_bad_irq(irq);
 }
 
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
-static void __init init_irq_default_affinity(void)
-{
-       alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-       cpumask_setall(irq_default_affinity);
-}
-#else
-static void __init init_irq_default_affinity(void)
-{
-}
-#endif
-
-/*
- * Linux has a controller-independent interrupt architecture.
- * Every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the appropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * The code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic or
- * having to touch the generic code.
- *
- * Controller mappings for all interrupt sources:
- */
-int nr_irqs = NR_IRQS;
-EXPORT_SYMBOL_GPL(nr_irqs);
-
-#ifdef CONFIG_SPARSE_IRQ
-
-static struct irq_desc irq_desc_init = {
-       .irq        = -1,
-       .status     = IRQ_DISABLED,
-       .chip       = &no_irq_chip,
-       .handle_irq = handle_bad_irq,
-       .depth      = 1,
-       .lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-};
-
-void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
-{
-       void *ptr;
-
-       ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
-                          GFP_ATOMIC, node);
-
-       /*
-        * don't overwite if can not get new one
-        * init_copy_kstat_irqs() could still use old one
-        */
-       if (ptr) {
-               printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
-               desc->kstat_irqs = ptr;
-       }
-}
-
-static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
-{
-       memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
-
-       raw_spin_lock_init(&desc->lock);
-       desc->irq = irq;
-#ifdef CONFIG_SMP
-       desc->node = node;
-#endif
-       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-       init_kstat_irqs(desc, node, nr_cpu_ids);
-       if (!desc->kstat_irqs) {
-               printk(KERN_ERR "can not alloc kstat_irqs\n");
-               BUG_ON(1);
-       }
-       if (!alloc_desc_masks(desc, node, false)) {
-               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
-               BUG_ON(1);
-       }
-       init_desc_masks(desc);
-       arch_init_chip_data(desc, node);
-}
-
-/*
- * Protect the sparse_irqs:
- */
-DEFINE_RAW_SPINLOCK(sparse_irq_lock);
-
-static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
-
-static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
-{
-       radix_tree_insert(&irq_desc_tree, irq, desc);
-}
-
-struct irq_desc *irq_to_desc(unsigned int irq)
-{
-       return radix_tree_lookup(&irq_desc_tree, irq);
-}
-
-void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
-{
-       void **ptr;
-
-       ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
-       if (ptr)
-               radix_tree_replace_slot(ptr, desc);
-}
-
-static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
-       [0 ... NR_IRQS_LEGACY-1] = {
-               .irq        = -1,
-               .status     = IRQ_DISABLED,
-               .chip       = &no_irq_chip,
-               .handle_irq = handle_bad_irq,
-               .depth      = 1,
-               .lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-       }
-};
-
-static unsigned int *kstat_irqs_legacy;
-
-int __init early_irq_init(void)
-{
-       struct irq_desc *desc;
-       int legacy_count;
-       int node;
-       int i;
-
-       init_irq_default_affinity();
-
-        /* initialize nr_irqs based on nr_cpu_ids */
-       arch_probe_nr_irqs();
-       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
-
-       desc = irq_desc_legacy;
-       legacy_count = ARRAY_SIZE(irq_desc_legacy);
-       node = first_online_node;
-
-       /* allocate based on nr_cpu_ids */
-       kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
-                                         sizeof(int), GFP_NOWAIT, node);
-
-       for (i = 0; i < legacy_count; i++) {
-               desc[i].irq = i;
-#ifdef CONFIG_SMP
-               desc[i].node = node;
-#endif
-               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
-               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-               alloc_desc_masks(&desc[i], node, true);
-               init_desc_masks(&desc[i]);
-               set_irq_desc(i, &desc[i]);
-       }
-
-       return arch_early_irq_init();
-}
-
-struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
-{
-       struct irq_desc *desc;
-       unsigned long flags;
-
-       if (irq >= nr_irqs) {
-               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
-                       irq, nr_irqs);
-               return NULL;
-       }
-
-       desc = irq_to_desc(irq);
-       if (desc)
-               return desc;
-
-       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
-
-       /* We have to check it to avoid races with another CPU */
-       desc = irq_to_desc(irq);
-       if (desc)
-               goto out_unlock;
-
-       desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
-
-       printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
-       if (!desc) {
-               printk(KERN_ERR "can not alloc irq_desc\n");
-               BUG_ON(1);
-       }
-       init_one_irq_desc(irq, desc, node);
-
-       set_irq_desc(irq, desc);
-
-out_unlock:
-       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
-
-       return desc;
-}
-
-#else /* !CONFIG_SPARSE_IRQ */
-
-struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
-       [0 ... NR_IRQS-1] = {
-               .status = IRQ_DISABLED,
-               .chip = &no_irq_chip,
-               .handle_irq = handle_bad_irq,
-               .depth = 1,
-               .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
-       }
-};
-
-static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
-int __init early_irq_init(void)
-{
-       struct irq_desc *desc;
-       int count;
-       int i;
-
-       init_irq_default_affinity();
-
-       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
-
-       desc = irq_desc;
-       count = ARRAY_SIZE(irq_desc);
-
-       for (i = 0; i < count; i++) {
-               desc[i].irq = i;
-               alloc_desc_masks(&desc[i], 0, true);
-               init_desc_masks(&desc[i]);
-               desc[i].kstat_irqs = kstat_irqs_all[i];
-       }
-       return arch_early_irq_init();
-}
-
-struct irq_desc *irq_to_desc(unsigned int irq)
-{
-       return (irq < NR_IRQS) ? irq_desc + irq : NULL;
-}
-
-struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
-{
-       return irq_to_desc(irq);
-}
-#endif /* !CONFIG_SPARSE_IRQ */
-
-void clear_kstat_irqs(struct irq_desc *desc)
-{
-       memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
-}
-
-/*
- * What should we do if we get a hw irq event on an illegal vector?
- * Each architecture has to answer this themself.
- */
-static void ack_bad(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       print_irq_desc(irq, desc);
-       ack_bad_irq(irq);
-}
-
-/*
- * NOP functions
- */
-static void noop(unsigned int irq)
-{
-}
-
-static unsigned int noop_ret(unsigned int irq)
-{
-       return 0;
-}
-
-/*
- * Generic no controller implementation
- */
-struct irq_chip no_irq_chip = {
-       .name           = "none",
-       .startup        = noop_ret,
-       .shutdown       = noop,
-       .enable         = noop,
-       .disable        = noop,
-       .ack            = ack_bad,
-       .end            = noop,
-};
-
-/*
- * Generic dummy implementation which can be used for
- * real dumb interrupt sources
- */
-struct irq_chip dummy_irq_chip = {
-       .name           = "dummy",
-       .startup        = noop_ret,
-       .shutdown       = noop,
-       .enable         = noop,
-       .disable        = noop,
-       .ack            = noop,
-       .mask           = noop,
-       .unmask         = noop,
-       .end            = noop,
-};
-
 /*
  * Special, empty irq handler:
  */
@@ -457,20 +150,20 @@ unsigned int __do_IRQ(unsigned int irq)
                /*
                 * No locking required for CPU-local interrupts:
                 */
-               if (desc->chip->ack)
-                       desc->chip->ack(irq);
+               if (desc->irq_data.chip->ack)
+                       desc->irq_data.chip->ack(irq);
                if (likely(!(desc->status & IRQ_DISABLED))) {
                        action_ret = handle_IRQ_event(irq, desc->action);
                        if (!noirqdebug)
                                note_interrupt(irq, desc, action_ret);
                }
-               desc->chip->end(irq);
+               desc->irq_data.chip->end(irq);
                return 1;
        }
 
        raw_spin_lock(&desc->lock);
-       if (desc->chip->ack)
-               desc->chip->ack(irq);
+       if (desc->irq_data.chip->ack)
+               desc->irq_data.chip->ack(irq);
        /*
         * REPLAY is when Linux resends an IRQ that was dropped earlier
         * WAITING is used by probe to mark irqs that are being tested
@@ -530,27 +223,9 @@ out:
         * The ->end() handler has to deal with interrupts which got
         * disabled while the handler was running.
         */
-       desc->chip->end(irq);
+       desc->irq_data.chip->end(irq);
        raw_spin_unlock(&desc->lock);
 
        return 1;
 }
 #endif
-
-void early_init_irq_lock_class(void)
-{
-       struct irq_desc *desc;
-       int i;
-
-       for_each_irq_desc(i, desc) {
-               lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-       }
-}
-
-unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       return desc ? desc->kstat_irqs[cpu] : 0;
-}
-EXPORT_SYMBOL(kstat_irqs_cpu);
-
index c63f3bc88f0b77727a45a2a8d44936d797aeaa01..8eb01e379ccc745de14f12855b755cf575dc21ba 100644 (file)
@@ -1,9 +1,12 @@
 /*
  * IRQ subsystem internal functions and variables:
  */
+#include <linux/irqdesc.h>
 
 extern int noirqdebug;
 
+#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
+
 /* Set default functions for irq_chip structures: */
 extern void irq_chip_set_defaults(struct irq_chip *chip);
 
@@ -20,16 +23,21 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
 extern void clear_kstat_irqs(struct irq_desc *desc);
 extern raw_spinlock_t sparse_irq_lock;
 
+/* Resending of interrupts :*/
+void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+
 #ifdef CONFIG_SPARSE_IRQ
 void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
 #endif
 
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
+extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
 extern void register_handler_proc(unsigned int irq, struct irqaction *action);
 extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
 #else
 static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
+static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
 static inline void register_handler_proc(unsigned int irq,
                                         struct irqaction *action) { }
 static inline void unregister_handler_proc(unsigned int irq,
@@ -40,17 +48,27 @@ extern int irq_select_affinity_usr(unsigned int irq);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
+#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
+static inline void irq_end(unsigned int irq, struct irq_desc *desc)
+{
+       if (desc->irq_data.chip && desc->irq_data.chip->end)
+               desc->irq_data.chip->end(irq);
+}
+#else
+static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
+#endif
+
 /* Inline functions for support of irq chips on slow busses */
-static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
+static inline void chip_bus_lock(struct irq_desc *desc)
 {
-       if (unlikely(desc->chip->bus_lock))
-               desc->chip->bus_lock(irq);
+       if (unlikely(desc->irq_data.chip->irq_bus_lock))
+               desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
 }
 
-static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
+static inline void chip_bus_sync_unlock(struct irq_desc *desc)
 {
-       if (unlikely(desc->chip->bus_sync_unlock))
-               desc->chip->bus_sync_unlock(irq);
+       if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
+               desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
 }
 
 /*
@@ -67,8 +85,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
                irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
        printk("->handle_irq():  %p, ", desc->handle_irq);
        print_symbol("%s\n", (unsigned long)desc->handle_irq);
-       printk("->chip(): %p, ", desc->chip);
-       print_symbol("%s\n", (unsigned long)desc->chip);
+       printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
+       print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
        printk("->action(): %p\n", desc->action);
        if (desc->action) {
                printk("->action->handler(): %p, ", desc->action->handler);
@@ -93,3 +111,99 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
 
 #undef P
 
+/* Stuff below will be cleaned up after the sparse allocator is done */
+
+#ifdef CONFIG_SMP
+/**
+ * alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc:      pointer to irq_desc struct
+ * @node:      node which will be handling the cpumasks
+ * @boot:      true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ */
+static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
+                                                       bool boot)
+{
+       gfp_t gfp = GFP_ATOMIC;
+
+       if (boot)
+               gfp = GFP_NOWAIT;
+
+#ifdef CONFIG_CPUMASK_OFFSTACK
+       if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
+               return false;
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
+               free_cpumask_var(desc->irq_data.affinity);
+               return false;
+       }
+#endif
+#endif
+       return true;
+}
+
+static inline void init_desc_masks(struct irq_desc *desc)
+{
+       cpumask_setall(desc->irq_data.affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_clear(desc->pending_mask);
+#endif
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc:  pointer to old irq_desc struct
+ * @new_desc:  pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASK_OFFSTACK
+       cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
+static inline void free_desc_masks(struct irq_desc *old_desc,
+                                  struct irq_desc *new_desc)
+{
+       free_cpumask_var(old_desc->irq_data.affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       free_cpumask_var(old_desc->pending_mask);
+#endif
+}
+
+#else /* !CONFIG_SMP */
+
+static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
+                                                               bool boot)
+{
+       return true;
+}
+
+static inline void init_desc_masks(struct irq_desc *desc)
+{
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+}
+
+static inline void free_desc_masks(struct irq_desc *old_desc,
+                                  struct irq_desc *new_desc)
+{
+}
+#endif /* CONFIG_SMP */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
new file mode 100644 (file)
index 0000000..a1fbd1d
--- /dev/null
@@ -0,0 +1,424 @@
+/*
+ * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
+ *
+ * This file contains the interrupt descriptor management code
+ *
+ * Detailed information is available in Documentation/DocBook/genericirq
+ *
+ */
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/radix-tree.h>
+#include <linux/bitmap.h>
+
+#include "internals.h"
+
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+struct lock_class_key irq_desc_lock_class;
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+static void __init init_irq_default_affinity(void)
+{
+       alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+       cpumask_setall(irq_default_affinity);
+}
+#else
+static void __init init_irq_default_affinity(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
+{
+       if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
+               return -ENOMEM;
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
+               free_cpumask_var(desc->irq_data.affinity);
+               return -ENOMEM;
+       }
+#endif
+       return 0;
+}
+
+static void desc_smp_init(struct irq_desc *desc, int node)
+{
+       desc->irq_data.node = node;
+       cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
+}
+
+#else
+static inline int
+alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
+static inline void desc_smp_init(struct irq_desc *desc, int node) { }
+#endif
+
+static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
+{
+       desc->irq_data.irq = irq;
+       desc->irq_data.chip = &no_irq_chip;
+       desc->irq_data.chip_data = NULL;
+       desc->irq_data.handler_data = NULL;
+       desc->irq_data.msi_desc = NULL;
+       desc->status = IRQ_DEFAULT_INIT_FLAGS;
+       desc->handle_irq = handle_bad_irq;
+       desc->depth = 1;
+       desc->name = NULL;
+       memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+       desc_smp_init(desc, node);
+}
+
+int nr_irqs = NR_IRQS;
+EXPORT_SYMBOL_GPL(nr_irqs);
+
+DEFINE_RAW_SPINLOCK(sparse_irq_lock);
+static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
+
+#ifdef CONFIG_SPARSE_IRQ
+
+void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
+{
+       void *ptr;
+
+       ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
+                          GFP_ATOMIC, node);
+
+       /*
+        * don't overwite if can not get new one
+        * init_copy_kstat_irqs() could still use old one
+        */
+       if (ptr) {
+               printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
+               desc->kstat_irqs = ptr;
+       }
+}
+
+static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
+
+static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
+{
+       radix_tree_insert(&irq_desc_tree, irq, desc);
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+       return radix_tree_lookup(&irq_desc_tree, irq);
+}
+
+void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+       void **ptr;
+
+       ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
+       if (ptr)
+               radix_tree_replace_slot(ptr, desc);
+}
+
+static void delete_irq_desc(unsigned int irq)
+{
+       radix_tree_delete(&irq_desc_tree, irq);
+}
+
+#ifdef CONFIG_SMP
+static void free_masks(struct irq_desc *desc)
+{
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       free_cpumask_var(desc->pending_mask);
+#endif
+       free_cpumask_var(desc->affinity);
+}
+#else
+static inline void free_masks(struct irq_desc *desc) { }
+#endif
+
+static struct irq_desc *alloc_desc(int irq, int node)
+{
+       /* Temporary hack until we can switch to GFP_KERNEL */
+       gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC;
+       struct irq_desc *desc;
+
+       desc = kzalloc_node(sizeof(*desc), gfp, node);
+       if (!desc)
+               return NULL;
+       /* allocate based on nr_cpu_ids */
+       desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
+                                        gfp, node);
+       if (!desc->kstat_irqs)
+               goto err_desc;
+
+       if (alloc_masks(desc, gfp, node))
+               goto err_kstat;
+
+       raw_spin_lock_init(&desc->lock);
+       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+
+       desc_set_defaults(irq, desc, node);
+
+       return desc;
+
+err_kstat:
+       kfree(desc->kstat_irqs);
+err_desc:
+       kfree(desc);
+       return NULL;
+}
+
+static void free_desc(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       unsigned long flags;
+
+       unregister_irq_proc(irq, desc);
+
+       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+       delete_irq_desc(irq);
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
+       free_masks(desc);
+       kfree(desc->kstat_irqs);
+       kfree(desc);
+}
+
+static int alloc_descs(unsigned int start, unsigned int cnt, int node)
+{
+       struct irq_desc *desc;
+       unsigned long flags;
+       int i;
+
+       for (i = 0; i < cnt; i++) {
+               desc = alloc_desc(start + i, node);
+               if (!desc)
+                       goto err;
+               /* temporary until I fixed x86 madness */
+               arch_init_chip_data(desc, node);
+               raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+               irq_insert_desc(start + i, desc);
+               raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+       }
+       return start;
+
+err:
+       for (i--; i >= 0; i--)
+               free_desc(start + i);
+
+       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+       bitmap_clear(allocated_irqs, start, cnt);
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+       return -ENOMEM;
+}
+
+struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
+{
+       int res = irq_alloc_descs(irq, irq, 1, node);
+
+       if (res == -EEXIST || res == irq)
+               return irq_to_desc(irq);
+       return NULL;
+}
+
+int __init early_irq_init(void)
+{
+       int i, initcnt, node = first_online_node;
+       struct irq_desc *desc;
+
+       init_irq_default_affinity();
+
+       /* Let arch update nr_irqs and return the nr of preallocated irqs */
+       initcnt = arch_probe_nr_irqs();
+       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
+
+       for (i = 0; i < initcnt; i++) {
+               desc = alloc_desc(i, node);
+               set_bit(i, allocated_irqs);
+               irq_insert_desc(i, desc);
+       }
+       return arch_early_irq_init();
+}
+
+#else /* !CONFIG_SPARSE_IRQ */
+
+struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
+       [0 ... NR_IRQS-1] = {
+               .status         = IRQ_DEFAULT_INIT_FLAGS,
+               .handle_irq     = handle_bad_irq,
+               .depth          = 1,
+               .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
+       }
+};
+
+static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
+int __init early_irq_init(void)
+{
+       int count, i, node = first_online_node;
+       struct irq_desc *desc;
+
+       init_irq_default_affinity();
+
+       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
+       desc = irq_desc;
+       count = ARRAY_SIZE(irq_desc);
+
+       for (i = 0; i < count; i++) {
+               desc[i].irq_data.irq = i;
+               desc[i].irq_data.chip = &no_irq_chip;
+               desc[i].kstat_irqs = kstat_irqs_all[i];
+               alloc_masks(desc + i, GFP_KERNEL, node);
+               desc_smp_init(desc + i, node);
+               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+       }
+       return arch_early_irq_init();
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+       return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+
+struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
+{
+       return irq_to_desc(irq);
+}
+
+#ifdef CONFIG_SMP
+static inline int desc_node(struct irq_desc *desc)
+{
+       return desc->irq_data.node;
+}
+#else
+static inline int desc_node(struct irq_desc *desc) { return 0; }
+#endif
+
+static void free_desc(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       desc_set_defaults(irq, desc, desc_node(desc));
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
+{
+       return start;
+}
+#endif /* !CONFIG_SPARSE_IRQ */
+
+/* Dynamic interrupt handling */
+
+/**
+ * irq_free_descs - free irq descriptors
+ * @from:      Start of descriptor range
+ * @cnt:       Number of consecutive irqs to free
+ */
+void irq_free_descs(unsigned int from, unsigned int cnt)
+{
+       unsigned long flags;
+       int i;
+
+       if (from >= nr_irqs || (from + cnt) > nr_irqs)
+               return;
+
+       for (i = 0; i < cnt; i++)
+               free_desc(from + i);
+
+       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+       bitmap_clear(allocated_irqs, from, cnt);
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+}
+
+/**
+ * irq_alloc_descs - allocate and initialize a range of irq descriptors
+ * @irq:       Allocate for specific irq number if irq >= 0
+ * @from:      Start the search from this irq number
+ * @cnt:       Number of consecutive irqs to allocate.
+ * @node:      Preferred node on which the irq descriptor should be allocated
+ *
+ * Returns the first irq number or error code
+ */
+int __ref
+irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
+{
+       unsigned long flags;
+       int start, ret;
+
+       if (!cnt)
+               return -EINVAL;
+
+       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+
+       start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
+       ret = -EEXIST;
+       if (irq >=0 && start != irq)
+               goto err;
+
+       ret = -ENOMEM;
+       if (start >= nr_irqs)
+               goto err;
+
+       bitmap_set(allocated_irqs, start, cnt);
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+       return alloc_descs(start, cnt, node);
+
+err:
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+       return ret;
+}
+
+/**
+ * irq_reserve_irqs - mark irqs allocated
+ * @from:      mark from irq number
+ * @cnt:       number of irqs to mark
+ *
+ * Returns 0 on success or an appropriate error code
+ */
+int irq_reserve_irqs(unsigned int from, unsigned int cnt)
+{
+       unsigned long flags;
+       unsigned int start;
+       int ret = 0;
+
+       if (!cnt || (from + cnt) > nr_irqs)
+               return -EINVAL;
+
+       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+       start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
+       if (start == from)
+               bitmap_set(allocated_irqs, start, cnt);
+       else
+               ret = -EEXIST;
+       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+       return ret;
+}
+
+/**
+ * irq_get_next_irq - get next allocated irq number
+ * @offset:    where to start the search
+ *
+ * Returns next irq number after offset or nr_irqs if none is found.
+ */
+unsigned int irq_get_next_irq(unsigned int offset)
+{
+       return find_next_bit(allocated_irqs, nr_irqs, offset);
+}
+
+/* Statistics access */
+void clear_kstat_irqs(struct irq_desc *desc)
+{
+       memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+}
+
+unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       return desc ? desc->kstat_irqs[cpu] : 0;
+}
index c3003e9d91a37da04c8c7ffc9aa5f986fc90fb04..644e8d5fa367e74c3cc06a2114f2e283c701b0e6 100644 (file)
@@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
-       if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
-           !desc->chip->set_affinity)
+       if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
+           !desc->irq_data.chip->irq_set_affinity)
                return 0;
 
        return 1;
@@ -109,17 +109,18 @@ void irq_set_thread_affinity(struct irq_desc *desc)
 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_chip *chip = desc->irq_data.chip;
        unsigned long flags;
 
-       if (!desc->chip->set_affinity)
+       if (!chip->irq_set_affinity)
                return -EINVAL;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT) {
-               if (!desc->chip->set_affinity(irq, cpumask)) {
-                       cpumask_copy(desc->affinity, cpumask);
+               if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
+                       cpumask_copy(desc->irq_data.affinity, cpumask);
                        irq_set_thread_affinity(desc);
                }
        }
@@ -128,8 +129,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
                cpumask_copy(desc->pending_mask, cpumask);
        }
 #else
-       if (!desc->chip->set_affinity(irq, cpumask)) {
-               cpumask_copy(desc->affinity, cpumask);
+       if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
+               cpumask_copy(desc->irq_data.affinity, cpumask);
                irq_set_thread_affinity(desc);
        }
 #endif
@@ -168,16 +169,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(desc->affinity, cpu_online_mask)
+               if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
                    < nr_cpu_ids)
                        goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
 
-       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
+       cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
 set_affinity:
-       desc->chip->set_affinity(irq, desc->affinity);
+       desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
 
        return 0;
 }
@@ -223,7 +224,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 
        if (!desc->depth++) {
                desc->status |= IRQ_DISABLED;
-               desc->chip->disable(irq);
+               desc->irq_data.chip->irq_disable(&desc->irq_data);
        }
 }
 
@@ -246,11 +247,11 @@ void disable_irq_nosync(unsigned int irq)
        if (!desc)
                return;
 
-       chip_bus_lock(irq, desc);
+       chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
        __disable_irq(desc, irq, false);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-       chip_bus_sync_unlock(irq, desc);
+       chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL(disable_irq_nosync);
 
@@ -313,7 +314,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  *     IRQ line is re-enabled.
  *
  *     This function may be called from IRQ context only when
- *     desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
+ *     desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  */
 void enable_irq(unsigned int irq)
 {
@@ -323,11 +324,11 @@ void enable_irq(unsigned int irq)
        if (!desc)
                return;
 
-       chip_bus_lock(irq, desc);
+       chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
        __enable_irq(desc, irq, false);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-       chip_bus_sync_unlock(irq, desc);
+       chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL(enable_irq);
 
@@ -336,8 +337,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
        struct irq_desc *desc = irq_to_desc(irq);
        int ret = -ENXIO;
 
-       if (desc->chip->set_wake)
-               ret = desc->chip->set_wake(irq, on);
+       if (desc->irq_data.chip->irq_set_wake)
+               ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 
        return ret;
 }
@@ -429,12 +430,12 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc)
 }
 
 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
-               unsigned long flags)
+                     unsigned long flags)
 {
        int ret;
-       struct irq_chip *chip = desc->chip;
+       struct irq_chip *chip = desc->irq_data.chip;
 
-       if (!chip || !chip->set_type) {
+       if (!chip || !chip->irq_set_type) {
                /*
                 * IRQF_TRIGGER_* but the PIC does not support multiple
                 * flow-types?
@@ -445,11 +446,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
        }
 
        /* caller masked out all except trigger mode flags */
-       ret = chip->set_type(irq, flags);
+       ret = chip->irq_set_type(&desc->irq_data, flags);
 
        if (ret)
-               pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
-                               (int)flags, irq, chip->set_type);
+               pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
+                      flags, irq, chip->irq_set_type);
        else {
                if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
                        flags |= IRQ_LEVEL;
@@ -457,8 +458,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
                desc->status |= flags;
 
-               if (chip != desc->chip)
-                       irq_chip_set_defaults(desc->chip);
+               if (chip != desc->irq_data.chip)
+                       irq_chip_set_defaults(desc->irq_data.chip);
        }
 
        return ret;
@@ -507,7 +508,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
 {
 again:
-       chip_bus_lock(irq, desc);
+       chip_bus_lock(desc);
        raw_spin_lock_irq(&desc->lock);
 
        /*
@@ -521,17 +522,17 @@ again:
         */
        if (unlikely(desc->status & IRQ_INPROGRESS)) {
                raw_spin_unlock_irq(&desc->lock);
-               chip_bus_sync_unlock(irq, desc);
+               chip_bus_sync_unlock(desc);
                cpu_relax();
                goto again;
        }
 
        if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
                desc->status &= ~IRQ_MASKED;
-               desc->chip->unmask(irq);
+               desc->irq_data.chip->irq_unmask(&desc->irq_data);
        }
        raw_spin_unlock_irq(&desc->lock);
-       chip_bus_sync_unlock(irq, desc);
+       chip_bus_sync_unlock(desc);
 }
 
 #ifdef CONFIG_SMP
@@ -556,7 +557,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
        }
 
        raw_spin_lock_irq(&desc->lock);
-       cpumask_copy(mask, desc->affinity);
+       cpumask_copy(mask, desc->irq_data.affinity);
        raw_spin_unlock_irq(&desc->lock);
 
        set_cpus_allowed_ptr(current, mask);
@@ -657,7 +658,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        if (!desc)
                return -EINVAL;
 
-       if (desc->chip == &no_irq_chip)
+       if (desc->irq_data.chip == &no_irq_chip)
                return -ENOSYS;
        /*
         * Some drivers like serial.c use request_irq() heavily,
@@ -752,7 +753,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        }
 
        if (!shared) {
-               irq_chip_set_defaults(desc->chip);
+               irq_chip_set_defaults(desc->irq_data.chip);
 
                init_waitqueue_head(&desc->wait_for_threads);
 
@@ -779,7 +780,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                if (!(desc->status & IRQ_NOAUTOEN)) {
                        desc->depth = 0;
                        desc->status &= ~IRQ_DISABLED;
-                       desc->chip->startup(irq);
+                       desc->irq_data.chip->irq_startup(&desc->irq_data);
                } else
                        /* Undo nested disables: */
                        desc->depth = 1;
@@ -912,17 +913,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 
        /* Currently used only by UML, might disappear one day: */
 #ifdef CONFIG_IRQ_RELEASE_METHOD
-       if (desc->chip->release)
-               desc->chip->release(irq, dev_id);
+       if (desc->irq_data.chip->release)
+               desc->irq_data.chip->release(irq, dev_id);
 #endif
 
        /* If this was the last handler, shut down the IRQ line: */
        if (!desc->action) {
                desc->status |= IRQ_DISABLED;
-               if (desc->chip->shutdown)
-                       desc->chip->shutdown(irq);
+               if (desc->irq_data.chip->irq_shutdown)
+                       desc->irq_data.chip->irq_shutdown(&desc->irq_data);
                else
-                       desc->chip->disable(irq);
+                       desc->irq_data.chip->irq_disable(&desc->irq_data);
        }
 
 #ifdef CONFIG_SMP
@@ -997,9 +998,9 @@ void free_irq(unsigned int irq, void *dev_id)
        if (!desc)
                return;
 
-       chip_bus_lock(irq, desc);
+       chip_bus_lock(desc);
        kfree(__free_irq(irq, dev_id));
-       chip_bus_sync_unlock(irq, desc);
+       chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL(free_irq);
 
@@ -1086,9 +1087,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        action->name = devname;
        action->dev_id = dev_id;
 
-       chip_bus_lock(irq, desc);
+       chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
-       chip_bus_sync_unlock(irq, desc);
+       chip_bus_sync_unlock(desc);
 
        if (retval)
                kfree(action);
index 241962280836ff73e8143dc8ebc63a83f53d9115..1d25419404803e60229dc15b1cc13980fe35db78 100644 (file)
@@ -7,6 +7,7 @@
 void move_masked_irq(int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_chip *chip = desc->irq_data.chip;
 
        if (likely(!(desc->status & IRQ_MOVE_PENDING)))
                return;
@@ -24,7 +25,7 @@ void move_masked_irq(int irq)
        if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
 
-       if (!desc->chip->set_affinity)
+       if (!chip->irq_set_affinity)
                return;
 
        assert_raw_spin_locked(&desc->lock);
@@ -43,8 +44,9 @@ void move_masked_irq(int irq)
         */
        if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
                   < nr_cpu_ids))
-               if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
-                       cpumask_copy(desc->affinity, desc->pending_mask);
+               if (!chip->irq_set_affinity(&desc->irq_data,
+                                           desc->pending_mask, false)) {
+                       cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
                        irq_set_thread_affinity(desc);
                }
 
@@ -61,8 +63,8 @@ void move_native_irq(int irq)
        if (unlikely(desc->status & IRQ_DISABLED))
                return;
 
-       desc->chip->mask(irq);
+       desc->irq_data.chip->irq_mask(&desc->irq_data);
        move_masked_irq(irq);
-       desc->chip->unmask(irq);
+       desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
index 65d3845665acad7ac0ad540c6f6f08805abf88eb..e7f1f16402c1d425ef781b177421723734e58f65 100644 (file)
@@ -44,7 +44,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
                return false;
        }
        raw_spin_lock_init(&desc->lock);
-       desc->node = node;
+       desc->irq_data.node = node;
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
        init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
        init_copy_desc_masks(old_desc, desc);
@@ -66,7 +66,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
        unsigned int irq;
        unsigned long flags;
 
-       irq = old_desc->irq;
+       irq = old_desc->irq_data.irq;
 
        raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 
@@ -109,10 +109,10 @@ out_unlock:
 struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
 {
        /* those static or target node is -1, do not move them */
-       if (desc->irq < NR_IRQS_LEGACY || node == -1)
+       if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1)
                return desc;
 
-       if (desc->node != node)
+       if (desc->irq_data.node != node)
                desc = __real_move_irq_desc(desc, node);
 
        return desc;
index 09a2ee540bd246e127f07c653f262b22009488ae..01b1d3a88983f92c189e0c9b27f23ffe0e73f88f 100644 (file)
@@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir;
 static int irq_affinity_proc_show(struct seq_file *m, void *v)
 {
        struct irq_desc *desc = irq_to_desc((long)m->private);
-       const struct cpumask *mask = desc->affinity;
+       const struct cpumask *mask = desc->irq_data.affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PENDING)
@@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
        cpumask_var_t new_value;
        int err;
 
-       if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
+       if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity ||
            irq_balancing_disabled(irq))
                return -EIO;
 
@@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v)
 {
        struct irq_desc *desc = irq_to_desc((long) m->private);
 
-       seq_printf(m, "%d\n", desc->node);
+       seq_printf(m, "%d\n", desc->irq_data.node);
        return 0;
 }
 
@@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
        char name [MAX_NAMELEN];
 
-       if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
+       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
                return;
 
        memset(name, 0, MAX_NAMELEN);
@@ -297,6 +297,24 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
                         &irq_spurious_proc_fops, (void *)(long)irq);
 }
 
+void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
+{
+       char name [MAX_NAMELEN];
+
+       if (!root_irq_dir || !desc->dir)
+               return;
+#ifdef CONFIG_SMP
+       remove_proc_entry("smp_affinity", desc->dir);
+       remove_proc_entry("affinity_hint", desc->dir);
+       remove_proc_entry("node", desc->dir);
+#endif
+       remove_proc_entry("spurious", desc->dir);
+
+       memset(name, 0, MAX_NAMELEN);
+       sprintf(name, "%u", irq);
+       remove_proc_entry(name, root_irq_dir);
+}
+
 #undef MAX_NAMELEN
 
 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
index 090c3763f3a294143b47d8a89cefff884fc1b826..891115a929aa1dfe223b01c5f50efbc0ec35a2f7 100644 (file)
@@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
        /*
         * Make sure the interrupt is enabled, before resending it:
         */
-       desc->chip->enable(irq);
+       desc->irq_data.chip->irq_enable(&desc->irq_data);
 
        /*
         * We do not resend level type interrupts. Level type
@@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
        if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
                desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
 
-               if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
+               if (!desc->irq_data.chip->irq_retrigger ||
+                   !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
 #ifdef CONFIG_HARDIRQS_SW_RESEND
                        /* Set it pending and activate the softirq: */
                        set_bit(irq, irqs_resend);
index 89fb90ae534f551defdda8e43d7f1d274cb12b9e..3089d3b9d5f3912643d49bf46960fe7938a46e6a 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/moduleparam.h>
 #include <linux/timer.h>
 
+#include "internals.h"
+
 static int irqfixup __read_mostly;
 
 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
@@ -78,8 +80,8 @@ static int try_one_irq(int irq, struct irq_desc *desc)
         * If we did actual work for the real IRQ line we must let the
         * IRQ controller clean up too
         */
-       if (work && desc->chip && desc->chip->end)
-               desc->chip->end(irq);
+       if (work)
+               irq_end(irq, desc);
        raw_spin_unlock(&desc->lock);
 
        return ok;
@@ -254,7 +256,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
                printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
                desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
                desc->depth++;
-               desc->chip->disable(irq);
+               desc->irq_data.chip->irq_disable(&desc->irq_data);
 
                mod_timer(&poll_spurious_irq_timer,
                          jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
index 07b4f1b1a73a9b6a309a3e7fe249c813007b5d17..14a7b80b2cce9bddabf2b1111216e14d3c20e389 100644 (file)
@@ -886,9 +886,10 @@ int __init __weak early_irq_init(void)
        return 0;
 }
 
+#ifdef CONFIG_GENERIC_HARDIRQS
 int __init __weak arch_probe_nr_irqs(void)
 {
-       return 0;
+       return NR_IRQS_LEGACY;
 }
 
 int __init __weak arch_early_irq_init(void)
@@ -900,3 +901,4 @@ int __weak arch_init_chip_data(struct irq_desc *desc, int node)
 {
        return 0;
 }
+#endif