]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
x86/Voyager: make it build and boot
authorJames Bottomley <James.Bottomley@HansenPartnership.com>
Sat, 31 Jan 2009 16:24:43 +0000 (17:24 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 31 Jan 2009 17:26:07 +0000 (18:26 +0100)
[
  mingo@elte.hu: these fixes are a subset of changes cherry-picked from:

     git://git.kernel.org:/pub/scm/linux/kernel/git/jejb/voyager-2.6.git

  They fix various problems that recent x86 changes caused in the Voyager
  subarchitecture: both APIC changes and cpumask changes and certain
  cleanups caused subarch assumptions to break.

  Most of these changes are obsolete as the subarch code has been removed
  from the x86 development tree - but we merge them upstream to make Voyager
  build and boot.
]

Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/irqinit_32.c
arch/x86/mach-default/setup.c
arch/x86/mach-voyager/setup.c
arch/x86/mach-voyager/voyager_smp.c

index 1507ad4e674d253f93b60e8df9188bd17354a987..10a09c2f1828c102893d71a5add47ae320e2df55 100644 (file)
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
        }
 }
 
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
-       .handler = no_action,
-       .mask = CPU_MASK_NONE,
-       .name = "cascade",
-};
-
 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
        [0 ... IRQ0_VECTOR - 1] = -1,
        [IRQ0_VECTOR] = 0,
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
        alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
 #endif
 
-       if (!acpi_ioapic)
-               setup_irq(2, &irq2);
-
        /* setup after call gates are initialised (usually add in
         * the architecture specific gates)
         */
index df167f2656228c83eb58ce2ebd4600074d766e77..a265a7c631901d111a9b907fa105754871a45e5b 100644 (file)
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
        init_ISA_irqs();
 }
 
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+       .handler = no_action,
+       .mask = CPU_MASK_NONE,
+       .name = "cascade",
+};
+
 /**
  * intr_init_hook - post gate setup interrupt initialisation
  *
@@ -53,6 +62,9 @@ void __init intr_init_hook(void)
                if (x86_quirks->arch_intr_init())
                        return;
        }
+       if (!acpi_ioapic)
+               setup_irq(2, &irq2);
+
 }
 
 /**
index a580b9562e76cc078ab3fd0b2b90c722d10fdf01..d914a7996a6684758f9ba1be29b6e012c88c21eb 100644 (file)
@@ -33,13 +33,23 @@ void __init intr_init_hook(void)
        setup_irq(2, &irq2);
 }
 
-void __init pre_setup_arch_hook(void)
+static void voyager_disable_tsc(void)
 {
        /* Voyagers run their CPUs from independent clocks, so disable
         * the TSC code because we can't sync them */
        setup_clear_cpu_cap(X86_FEATURE_TSC);
 }
 
+void __init pre_setup_arch_hook(void)
+{
+       voyager_disable_tsc();
+}
+
+void __init pre_time_init_hook(void)
+{
+       voyager_disable_tsc();
+}
+
 void __init trap_init_hook(void)
 {
 }
index 9840b7ec749a00dfc7a906e8a9045b76d545650d..7ffcdeec4631fa7b1adf54ec2bbbd71ee98402ba 100644 (file)
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
 static void disable_local_vic_irq(unsigned int irq);
 static void before_handle_vic_irq(unsigned int irq);
 static void after_handle_vic_irq(unsigned int irq);
-static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
+static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
 static void ack_vic_irq(unsigned int irq);
 static void vic_enable_cpi(void);
 static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
 static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
 /* This is for the new dynamic CPU boot code */
-cpumask_t cpu_callin_map = CPU_MASK_NONE;
-cpumask_t cpu_callout_map = CPU_MASK_NONE;
 
 /* The per processor IRQ masks (these are usually kept in sync) */
 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
        cpus_addr(phys_cpu_present_map)[0] |=
            voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
                                       3) << 24;
-       cpu_possible_map = phys_cpu_present_map;
+       init_cpu_possible(&phys_cpu_present_map);
        printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
               cpus_addr(phys_cpu_present_map)[0]);
        /* Here we set up the VIC to enable SMP */
@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq)
  * change the mask and then do an interrupt enable CPI to re-enable on
  * the selected processors */
 
-void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
 {
        /* Only extended processors handle interrupts */
        unsigned long real_mask;
        unsigned long irq_mask = 1 << irq;
        int cpu;
 
-       real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
+       real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
 
-       if (cpus_addr(mask)[0] == 0)
+       if (cpus_addr(*mask)[0] == 0)
                /* can't have no CPUs to accept the interrupt -- extremely
                 * bad things will happen */
                return;
@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
        init_gdt(smp_processor_id());
        switch_to_new_gdt();
 
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), cpu_callout_map);
-       cpu_set(smp_processor_id(), cpu_possible_map);
-       cpu_set(smp_processor_id(), cpu_present_map);
+       cpu_online_map = cpumask_of_cpu(smp_processor_id());
+       cpu_callout_map = cpumask_of_cpu(smp_processor_id());
+       cpu_callin_map = CPU_MASK_NONE;
+       cpu_present_map = cpumask_of_cpu(smp_processor_id());
+
 }
 
 static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void)
        x86_write_percpu(cpu_number, hard_smp_processor_id());
 }
 
-static void voyager_send_call_func(cpumask_t callmask)
+static void voyager_send_call_func(const struct cpumask *callmask)
 {
-       __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
+       __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
        send_CPI(mask, VIC_CALL_FUNCTION_CPI);
 }