]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - arch/i386/kernel/vmi.c
[PATCH] vmi: fix highpte
[net-next-2.6.git] / arch / i386 / kernel / vmi.c
index bb5a7abf949cfd89c239ac7ec68ff4219969ac74..bd1037bd124bb24565d27c360bd09fe5c2c748ff 100644 (file)
@@ -54,6 +54,7 @@ static int disable_pse;
 static int disable_sep;
 static int disable_tsc;
 static int disable_mtrr;
+static int disable_noidle;
 
 /* Cached VMI operations */
 struct {
@@ -255,7 +256,6 @@ static void vmi_nop(void)
 }
 
 /* For NO_IDLE_HZ, we stop the clock when halting the kernel */
-#ifdef CONFIG_NO_IDLE_HZ
 static fastcall void vmi_safe_halt(void)
 {
        int idle = vmi_stop_hz_timer();
@@ -266,7 +266,6 @@ static fastcall void vmi_safe_halt(void)
                local_irq_enable();
        }
 }
-#endif
 
 #ifdef CONFIG_DEBUG_PAGE_TYPE
 
@@ -371,6 +370,24 @@ static void vmi_check_page_type(u32 pfn, int type)
 #define vmi_check_page_type(p,t) do { } while (0)
 #endif
 
+static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
+{
+       /*
+        * Internally, the VMI ROM must map virtual addresses to physical
+        * addresses for processing MMU updates.  By the time MMU updates
+        * are issued, this information is typically already lost.
+        * Fortunately, the VMI provides a cache of mapping slots for active
+        * page tables.
+        *
+        * We use slot zero for the linear mapping of physical memory, and
+        * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+        *
+        *  args:                 SLOT                 VA    COUNT PFN
+        */
+       BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+       vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
 static void vmi_allocate_pt(u32 pfn)
 {
        vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -742,12 +759,7 @@ static inline int __init activate_vmi(void)
                     (char *)paravirt_ops.save_fl);
        patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
                     (char *)paravirt_ops.irq_disable);
-#ifndef CONFIG_NO_IDLE_HZ
-       para_fill(safe_halt, Halt);
-#else
-       vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
-       paravirt_ops.safe_halt = vmi_safe_halt;
-#endif
+
        para_fill(wbinvd, WBINVD);
        /* paravirt_ops.read_msr = vmi_rdmsr */
        /* paravirt_ops.write_msr = vmi_wrmsr */
@@ -819,6 +831,7 @@ static inline int __init activate_vmi(void)
        vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
        vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
 
+       paravirt_ops.map_pt_hook = vmi_map_pt_hook;
        paravirt_ops.alloc_pt = vmi_allocate_pt;
        paravirt_ops.alloc_pd = vmi_allocate_pd;
        paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
@@ -879,7 +892,14 @@ static inline int __init activate_vmi(void)
                paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm;
                paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm;
 #endif
-               custom_sched_clock = vmi_sched_clock;
+               paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles;
+               paravirt_ops.get_cpu_khz = vmi_cpu_khz;
+       }
+       if (!disable_noidle)
+               para_fill(safe_halt, Halt);
+       else {
+               vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+               paravirt_ops.safe_halt = vmi_safe_halt;
        }
 
        /*
@@ -914,9 +934,11 @@ void __init vmi_init(void)
 
        local_irq_save(flags);
        activate_vmi();
-#ifdef CONFIG_SMP
+
+#ifdef CONFIG_X86_IO_APIC
        no_timer_check = 1;
 #endif
+
        local_irq_restore(flags & X86_EFLAGS_IF);
 }
 
@@ -942,7 +964,8 @@ static int __init parse_vmi(char *arg)
        } else if (!strcmp(arg, "disable_mtrr")) {
                clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
                disable_mtrr = 1;
-       }
+       } else if (!strcmp(arg, "disable_noidle"))
+               disable_noidle = 1;
        return 0;
 }