]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/lguest/boot.c
Merge branch 'fix' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6
[net-next-2.6.git] / arch / x86 / lguest / boot.c
CommitLineData
f938d2c8
RR
1/*P:010
2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
5 *
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
11 * Launcher.
12 *
a6bd8e13
RR
13 * Secondly, we only run specially modified Guests, not normal kernels: setting
14 * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
15 * how to be a Guest at boot time. This means that you can use the same kernel
16 * you boot normally (ie. as a Host) as a Guest.
07ad157f 17 *
f938d2c8
RR
18 * These Guests know that they cannot do privileged operations, such as disable
19 * interrupts, and that they have to ask the Host to do such things explicitly.
20 * This file consists of all the replacements for such low-level native
21 * hardware operations: these special Guest versions call the Host.
22 *
a6bd8e13
RR
23 * So how does the kernel know it's a Guest? We'll see that later, but let's
24 * just say that we end up here where we replace the native functions various
2e04ef76
RR
25 * "paravirt" structures with our Guest versions, then boot like normal.
26:*/
f938d2c8
RR
27
28/*
07ad157f
RR
29 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
30 *
31 * This program is free software; you can redistribute it and/or modify
32 * it under the terms of the GNU General Public License as published by
33 * the Free Software Foundation; either version 2 of the License, or
34 * (at your option) any later version.
35 *
36 * This program is distributed in the hope that it will be useful, but
37 * WITHOUT ANY WARRANTY; without even the implied warranty of
38 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
39 * NON INFRINGEMENT. See the GNU General Public License for more
40 * details.
41 *
42 * You should have received a copy of the GNU General Public License
43 * along with this program; if not, write to the Free Software
44 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
45 */
46#include <linux/kernel.h>
47#include <linux/start_kernel.h>
48#include <linux/string.h>
49#include <linux/console.h>
50#include <linux/screen_info.h>
51#include <linux/irq.h>
52#include <linux/interrupt.h>
d7e28ffe
RR
53#include <linux/clocksource.h>
54#include <linux/clockchips.h>
07ad157f
RR
55#include <linux/lguest.h>
56#include <linux/lguest_launcher.h>
19f1537b 57#include <linux/virtio_console.h>
4cfe6c3c 58#include <linux/pm.h>
7b6aa335 59#include <asm/apic.h>
cbc34973 60#include <asm/lguest.h>
07ad157f
RR
61#include <asm/paravirt.h>
62#include <asm/param.h>
63#include <asm/page.h>
64#include <asm/pgtable.h>
65#include <asm/desc.h>
66#include <asm/setup.h>
67#include <asm/e820.h>
68#include <asm/mce.h>
69#include <asm/io.h>
625efab1 70#include <asm/i387.h>
2cb7878a 71#include <asm/stackprotector.h>
ec04b13f 72#include <asm/reboot.h> /* for struct machine_ops */
07ad157f 73
b2b47c21
RR
74/*G:010 Welcome to the Guest!
75 *
76 * The Guest in our tale is a simple creature: identical to the Host but
77 * behaving in simplified but equivalent ways. In particular, the Guest is the
2e04ef76
RR
78 * same kernel as the Host (or at least, built from the same source code).
79:*/
b2b47c21 80
07ad157f
RR
81struct lguest_data lguest_data = {
82 .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
83 .noirq_start = (u32)lguest_noirq_start,
84 .noirq_end = (u32)lguest_noirq_end,
47436aa4 85 .kernel_address = PAGE_OFFSET,
07ad157f 86 .blocked_interrupts = { 1 }, /* Block timer interrupts */
c18acd73 87 .syscall_vec = SYSCALL_VECTOR,
07ad157f 88};
07ad157f 89
2e04ef76
RR
90/*G:037
91 * async_hcall() is pretty simple: I'm quite proud of it really. We have a
b2b47c21 92 * ring buffer of stored hypercalls which the Host will run though next time we
cefcad17 93 * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall
b2b47c21
RR
94 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
95 * and 255 once the Host has finished with it.
96 *
97 * If we come around to a slot which hasn't been finished, then the table is
98 * full and we just make the hypercall directly. This has the nice side
99 * effect of causing the Host to run all the stored calls in the ring buffer
2e04ef76
RR
100 * which empties it for next time!
101 */
9b56fdb4 102static void async_hcall(unsigned long call, unsigned long arg1,
cefcad17
MZ
103 unsigned long arg2, unsigned long arg3,
104 unsigned long arg4)
07ad157f
RR
105{
106 /* Note: This code assumes we're uniprocessor. */
107 static unsigned int next_call;
108 unsigned long flags;
109
2e04ef76
RR
110 /*
111 * Disable interrupts if not already disabled: we don't want an
b2b47c21 112 * interrupt handler making a hypercall while we're already doing
2e04ef76
RR
113 * one!
114 */
07ad157f
RR
115 local_irq_save(flags);
116 if (lguest_data.hcall_status[next_call] != 0xFF) {
117 /* Table full, so do normal hcall which will flush table. */
091ebf07 118 hcall(call, arg1, arg2, arg3, arg4);
07ad157f 119 } else {
b410e7b1
JS
120 lguest_data.hcalls[next_call].arg0 = call;
121 lguest_data.hcalls[next_call].arg1 = arg1;
122 lguest_data.hcalls[next_call].arg2 = arg2;
123 lguest_data.hcalls[next_call].arg3 = arg3;
cefcad17 124 lguest_data.hcalls[next_call].arg4 = arg4;
b2b47c21 125 /* Arguments must all be written before we mark it to go */
07ad157f
RR
126 wmb();
127 lguest_data.hcall_status[next_call] = 0;
128 if (++next_call == LHCALL_RING_SIZE)
129 next_call = 0;
130 }
131 local_irq_restore(flags);
132}
9b56fdb4 133
2e04ef76
RR
134/*G:035
135 * Notice the lazy_hcall() above, rather than hcall(). This is our first real
136 * optimization trick!
633872b9
RR
137 *
138 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
139 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
140 * are reasonably expensive, batching them up makes sense. For example, a
141 * large munmap might update dozens of page table entries: that code calls
142 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
143 * lguest_leave_lazy_mode().
144 *
145 * So, when we're in lazy mode, we call async_hcall() to store the call for
2e04ef76
RR
146 * future processing:
147 */
091ebf07 148static void lazy_hcall1(unsigned long call, unsigned long arg1)
4cd8b5e2
MZ
149{
150 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
091ebf07 151 hcall(call, arg1, 0, 0, 0);
4cd8b5e2 152 else
cefcad17 153 async_hcall(call, arg1, 0, 0, 0);
4cd8b5e2
MZ
154}
155
a91d74a3 156/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
4cd8b5e2 157static void lazy_hcall2(unsigned long call,
091ebf07
RR
158 unsigned long arg1,
159 unsigned long arg2)
4cd8b5e2
MZ
160{
161 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
091ebf07 162 hcall(call, arg1, arg2, 0, 0);
4cd8b5e2 163 else
cefcad17 164 async_hcall(call, arg1, arg2, 0, 0);
4cd8b5e2
MZ
165}
166
167static void lazy_hcall3(unsigned long call,
091ebf07
RR
168 unsigned long arg1,
169 unsigned long arg2,
170 unsigned long arg3)
9b56fdb4
AB
171{
172 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
091ebf07 173 hcall(call, arg1, arg2, arg3, 0);
9b56fdb4 174 else
cefcad17
MZ
175 async_hcall(call, arg1, arg2, arg3, 0);
176}
177
acdd0b62 178#ifdef CONFIG_X86_PAE
cefcad17 179static void lazy_hcall4(unsigned long call,
091ebf07
RR
180 unsigned long arg1,
181 unsigned long arg2,
182 unsigned long arg3,
183 unsigned long arg4)
cefcad17
MZ
184{
185 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
091ebf07 186 hcall(call, arg1, arg2, arg3, arg4);
cefcad17
MZ
187 else
188 async_hcall(call, arg1, arg2, arg3, arg4);
9b56fdb4 189}
acdd0b62 190#endif
633872b9 191
a91d74a3
RR
192/*G:036
193 * When lazy mode is turned off reset the per-cpu lazy mode variable and then
194 * issue the do-nothing hypercall to flush any stored calls.
195:*/
b407fc57 196static void lguest_leave_lazy_mmu_mode(void)
633872b9 197{
091ebf07 198 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
b407fc57
JF
199 paravirt_leave_lazy_mmu();
200}
201
224101ed 202static void lguest_end_context_switch(struct task_struct *next)
b407fc57 203{
091ebf07 204 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
224101ed 205 paravirt_end_context_switch(next);
633872b9 206}
07ad157f 207
61f4bc83 208/*G:032
e1e72965
RR
209 * After that diversion we return to our first native-instruction
210 * replacements: four functions for interrupt control.
b2b47c21
RR
211 *
212 * The simplest way of implementing these would be to have "turn interrupts
213 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
214 * these are by far the most commonly called functions of those we override.
215 *
216 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
217 * which the Guest can update with a single instruction. The Host knows to
a6bd8e13 218 * check there before it tries to deliver an interrupt.
b2b47c21
RR
219 */
220
2e04ef76
RR
221/*
222 * save_flags() is expected to return the processor state (ie. "flags"). The
65ea5b03 223 * flags word contains all kind of stuff, but in practice Linux only cares
2e04ef76
RR
224 * about the interrupt flag. Our "save_flags()" just returns that.
225 */
07ad157f
RR
226static unsigned long save_fl(void)
227{
228 return lguest_data.irq_enabled;
229}
07ad157f 230
b2b47c21 231/* Interrupts go off... */
07ad157f
RR
232static void irq_disable(void)
233{
234 lguest_data.irq_enabled = 0;
235}
236
2e04ef76
RR
237/*
238 * Let's pause a moment. Remember how I said these are called so often?
61f4bc83
RR
239 * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
240 * break some rules. In particular, these functions are assumed to save their
241 * own registers if they need to: normal C functions assume they can trash the
242 * eax register. To use normal C functions, we use
243 * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
2e04ef76
RR
244 * C function, then restores it.
245 */
61f4bc83
RR
246PV_CALLEE_SAVE_REGS_THUNK(save_fl);
247PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
248/*:*/
a32a8813 249
61f4bc83
RR
250/* These are in i386_head.S */
251extern void lg_irq_enable(void);
252extern void lg_restore_fl(unsigned long flags);
ecb93d1c 253
2e04ef76 254/*M:003
a91d74a3
RR
255 * We could be more efficient in our checking of outstanding interrupts, rather
256 * than using a branch. One way would be to put the "irq_enabled" field in a
257 * page by itself, and have the Host write-protect it when an interrupt comes
258 * in when irqs are disabled. There will then be a page fault as soon as
259 * interrupts are re-enabled.
a6bd8e13
RR
260 *
261 * A better method is to implement soft interrupt disable generally for x86:
262 * instead of disabling interrupts, we set a flag. If an interrupt does come
263 * in, we then disable them for real. This is uncommon, so we could simply use
2e04ef76
RR
264 * a hypercall for interrupt control and not worry about efficiency.
265:*/
07ad157f 266
b2b47c21
RR
267/*G:034
268 * The Interrupt Descriptor Table (IDT).
269 *
270 * The IDT tells the processor what to do when an interrupt comes in. Each
271 * entry in the table is a 64-bit descriptor: this holds the privilege level,
272 * address of the handler, and... well, who cares? The Guest just asks the
273 * Host to make the change anyway, because the Host controls the real IDT.
274 */
8d947344
GOC
275static void lguest_write_idt_entry(gate_desc *dt,
276 int entrynum, const gate_desc *g)
07ad157f 277{
2e04ef76
RR
278 /*
279 * The gate_desc structure is 8 bytes long: we hand it to the Host in
a6bd8e13
RR
280 * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors
281 * around like this; typesafety wasn't a big concern in Linux's early
2e04ef76
RR
282 * years.
283 */
8d947344 284 u32 *desc = (u32 *)g;
b2b47c21 285 /* Keep the local copy up to date. */
8d947344 286 native_write_idt_entry(dt, entrynum, g);
b2b47c21 287 /* Tell Host about this new entry. */
091ebf07 288 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
07ad157f
RR
289}
290
2e04ef76
RR
291/*
292 * Changing to a different IDT is very rare: we keep the IDT up-to-date every
b2b47c21 293 * time it is written, so we can simply loop through all entries and tell the
2e04ef76
RR
294 * Host about them.
295 */
6b68f01b 296static void lguest_load_idt(const struct desc_ptr *desc)
07ad157f
RR
297{
298 unsigned int i;
299 struct desc_struct *idt = (void *)desc->address;
300
301 for (i = 0; i < (desc->size+1)/8; i++)
091ebf07 302 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
07ad157f
RR
303}
304
b2b47c21
RR
305/*
306 * The Global Descriptor Table.
307 *
308 * The Intel architecture defines another table, called the Global Descriptor
309 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
310 * instruction, and then several other instructions refer to entries in the
311 * table. There are three entries which the Switcher needs, so the Host simply
312 * controls the entire thing and the Guest asks it to make changes using the
313 * LOAD_GDT hypercall.
314 *
a489f0b5 315 * This is the exactly like the IDT code.
b2b47c21 316 */
6b68f01b 317static void lguest_load_gdt(const struct desc_ptr *desc)
07ad157f 318{
a489f0b5
RR
319 unsigned int i;
320 struct desc_struct *gdt = (void *)desc->address;
321
322 for (i = 0; i < (desc->size+1)/8; i++)
091ebf07 323 hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
07ad157f
RR
324}
325
2e04ef76
RR
326/*
327 * For a single GDT entry which changes, we do the lazy thing: alter our GDT,
b2b47c21 328 * then tell the Host to reload the entire thing. This operation is so rare
2e04ef76
RR
329 * that this naive implementation is reasonable.
330 */
014b15be
GOC
331static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
332 const void *desc, int type)
07ad157f 333{
014b15be 334 native_write_gdt_entry(dt, entrynum, desc, type);
a489f0b5 335 /* Tell Host about this new entry. */
091ebf07
RR
336 hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
337 dt[entrynum].a, dt[entrynum].b, 0);
07ad157f
RR
338}
339
2e04ef76
RR
340/*
341 * OK, I lied. There are three "thread local storage" GDT entries which change
b2b47c21 342 * on every context switch (these three entries are how glibc implements
2e04ef76
RR
343 * __thread variables). So we have a hypercall specifically for this case.
344 */
07ad157f
RR
345static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
346{
2e04ef76
RR
347 /*
348 * There's one problem which normal hardware doesn't have: the Host
0d027c01 349 * can't handle us removing entries we're currently using. So we clear
2e04ef76
RR
350 * the GS register here: if it's needed it'll be reloaded anyway.
351 */
ccbeed3a 352 lazy_load_gs(0);
4cd8b5e2 353 lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu);
07ad157f
RR
354}
355
2e04ef76
RR
356/*G:038
357 * That's enough excitement for now, back to ploughing through each of the
358 * different pv_ops structures (we're about 1/3 of the way through).
b2b47c21
RR
359 *
360 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
361 * uses this for some strange applications like Wine. We don't do anything
2e04ef76
RR
362 * here, so they'll get an informative and friendly Segmentation Fault.
363 */
07ad157f
RR
364static void lguest_set_ldt(const void *addr, unsigned entries)
365{
366}
367
2e04ef76
RR
368/*
369 * This loads a GDT entry into the "Task Register": that entry points to a
b2b47c21
RR
370 * structure called the Task State Segment. Some comments scattered though the
371 * kernel code indicate that this used for task switching in ages past, along
372 * with blood sacrifice and astrology.
373 *
374 * Now there's nothing interesting in here that we don't get told elsewhere.
375 * But the native version uses the "ltr" instruction, which makes the Host
376 * complain to the Guest about a Segmentation Fault and it'll oops. So we
2e04ef76
RR
377 * override the native version with a do-nothing version.
378 */
07ad157f
RR
379static void lguest_load_tr_desc(void)
380{
381}
382
2e04ef76
RR
383/*
384 * The "cpuid" instruction is a way of querying both the CPU identity
b2b47c21 385 * (manufacturer, model, etc) and its features. It was introduced before the
a6bd8e13
RR
386 * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
387 * As you might imagine, after a decade and a half this treatment, it is now a
388 * giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
b2b47c21
RR
389 *
390 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
2e04ef76 391 * has been translated into 5 languages. I am not making this up!
b2b47c21
RR
392 *
393 * We could get funky here and identify ourselves as "GenuineLguest", but
394 * instead we just use the real "cpuid" instruction. Then I pretty much turned
395 * off feature bits until the Guest booted. (Don't say that: you'll damage
396 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
397 * hardly future proof.) Noone's listening! They don't like you anyway,
398 * parenthetic weirdo!
399 *
400 * Replacing the cpuid so we can turn features off is great for the kernel, but
401 * anyone (including userspace) can just use the raw "cpuid" instruction and
402 * the Host won't even notice since it isn't privileged. So we try not to get
2e04ef76
RR
403 * too worked up about it.
404 */
65ea5b03
PA
405static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
406 unsigned int *cx, unsigned int *dx)
07ad157f 407{
65ea5b03 408 int function = *ax;
07ad157f 409
65ea5b03 410 native_cpuid(ax, bx, cx, dx);
07ad157f 411 switch (function) {
2e04ef76
RR
412 /*
413 * CPUID 0 gives the highest legal CPUID number (and the ID string).
414 * We futureproof our code a little by sticking to known CPUID values.
415 */
416 case 0:
7a504920
RR
417 if (*ax > 5)
418 *ax = 5;
419 break;
2e04ef76
RR
420
421 /*
422 * CPUID 1 is a basic feature request.
423 *
424 * CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3
425 * DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE.
426 */
427 case 1:
65ea5b03 428 *cx &= 0x00002201;
acdd0b62 429 *dx &= 0x07808151;
2e04ef76
RR
430 /*
431 * The Host can do a nice optimization if it knows that the
b2b47c21
RR
432 * kernel mappings (addresses above 0xC0000000 or whatever
433 * PAGE_OFFSET is set to) haven't changed. But Linux calls
434 * flush_tlb_user() for both user and kernel mappings unless
2e04ef76
RR
435 * the Page Global Enable (PGE) feature bit is set.
436 */
65ea5b03 437 *dx |= 0x00002000;
2e04ef76
RR
438 /*
439 * We also lie, and say we're family id 5. 6 or greater
cbd88c8e 440 * leads to a rdmsr in early_init_intel which we can't handle.
2e04ef76
RR
441 * Family ID is returned as bits 8-12 in ax.
442 */
cbd88c8e
RR
443 *ax &= 0xFFFFF0FF;
444 *ax |= 0x00000500;
07ad157f 445 break;
2e04ef76
RR
446 /*
447 * 0x80000000 returns the highest Extended Function, so we futureproof
448 * like we do above by limiting it to known fields.
449 */
07ad157f 450 case 0x80000000:
65ea5b03
PA
451 if (*ax > 0x80000008)
452 *ax = 0x80000008;
07ad157f 453 break;
2e04ef76
RR
454
455 /*
456 * PAE systems can mark pages as non-executable. Linux calls this the
457 * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
458 * Virus Protection). We just switch turn if off here, since we don't
459 * support it.
460 */
acdd0b62 461 case 0x80000001:
acdd0b62
MZ
462 *dx &= ~(1 << 20);
463 break;
07ad157f
RR
464 }
465}
466
2e04ef76
RR
467/*
468 * Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
b2b47c21
RR
469 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
470 * it. The Host needs to know when the Guest wants to change them, so we have
471 * a whole series of functions like read_cr0() and write_cr0().
472 *
e1e72965 473 * We start with cr0. cr0 allows you to turn on and off all kinds of basic
b2b47c21
RR
474 * features, but Linux only really cares about one: the horrifically-named Task
475 * Switched (TS) bit at bit 3 (ie. 8)
476 *
477 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
478 * the floating point unit is used. Which allows us to restore FPU state
479 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
480 * name like "FPUTRAP bit" be a little less cryptic?
481 *
ad5173ff 482 * We store cr0 locally because the Host never changes it. The Guest sometimes
2e04ef76
RR
483 * wants to read it and we'd prefer not to bother the Host unnecessarily.
484 */
ad5173ff 485static unsigned long current_cr0;
07ad157f
RR
486static void lguest_write_cr0(unsigned long val)
487{
4cd8b5e2 488 lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
07ad157f
RR
489 current_cr0 = val;
490}
491
492static unsigned long lguest_read_cr0(void)
493{
494 return current_cr0;
495}
496
2e04ef76
RR
497/*
498 * Intel provided a special instruction to clear the TS bit for people too cool
b2b47c21 499 * to use write_cr0() to do it. This "clts" instruction is faster, because all
2e04ef76
RR
500 * the vowels have been optimized out.
501 */
07ad157f
RR
502static void lguest_clts(void)
503{
4cd8b5e2 504 lazy_hcall1(LHCALL_TS, 0);
25c47bb3 505 current_cr0 &= ~X86_CR0_TS;
07ad157f
RR
506}
507
2e04ef76
RR
508/*
509 * cr2 is the virtual address of the last page fault, which the Guest only ever
b2b47c21 510 * reads. The Host kindly writes this into our "struct lguest_data", so we
2e04ef76
RR
511 * just read it out of there.
512 */
07ad157f
RR
513static unsigned long lguest_read_cr2(void)
514{
515 return lguest_data.cr2;
516}
517
ad5173ff
RR
518/* See lguest_set_pte() below. */
519static bool cr3_changed = false;
520
2e04ef76
RR
521/*
522 * cr3 is the current toplevel pagetable page: the principle is the same as
ad5173ff
RR
523 * cr0. Keep a local copy, and tell the Host when it changes. The only
524 * difference is that our local copy is in lguest_data because the Host needs
2e04ef76
RR
525 * to set it upon our initial hypercall.
526 */
07ad157f
RR
527static void lguest_write_cr3(unsigned long cr3)
528{
ad5173ff 529 lguest_data.pgdir = cr3;
4cd8b5e2 530 lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
ad5173ff 531 cr3_changed = true;
07ad157f
RR
532}
533
534static unsigned long lguest_read_cr3(void)
535{
ad5173ff 536 return lguest_data.pgdir;
07ad157f
RR
537}
538
e1e72965 539/* cr4 is used to enable and disable PGE, but we don't care. */
07ad157f
RR
540static unsigned long lguest_read_cr4(void)
541{
542 return 0;
543}
544
545static void lguest_write_cr4(unsigned long val)
546{
547}
548
b2b47c21
RR
549/*
550 * Page Table Handling.
551 *
552 * Now would be a good time to take a rest and grab a coffee or similarly
553 * relaxing stimulant. The easy parts are behind us, and the trek gradually
554 * winds uphill from here.
555 *
556 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
557 * maps virtual addresses to physical addresses using "page tables". We could
558 * use one huge index of 1 million entries: each address is 4 bytes, so that's
559 * 1024 pages just to hold the page tables. But since most virtual addresses
e1e72965 560 * are unused, we use a two level index which saves space. The cr3 register
b2b47c21
RR
561 * contains the physical address of the top level "page directory" page, which
562 * contains physical addresses of up to 1024 second-level pages. Each of these
563 * second level pages contains up to 1024 physical addresses of actual pages,
564 * or Page Table Entries (PTEs).
565 *
566 * Here's a diagram, where arrows indicate physical addresses:
567 *
e1e72965 568 * cr3 ---> +---------+
b2b47c21
RR
569 * | --------->+---------+
570 * | | | PADDR1 |
a91d74a3 571 * Mid-level | | PADDR2 |
b2b47c21
RR
572 * (PMD) page | | |
573 * | | Lower-level |
574 * | | (PTE) page |
575 * | | | |
576 * .... ....
577 *
578 * So to convert a virtual address to a physical address, we look up the top
579 * level, which points us to the second level, which gives us the physical
580 * address of that page. If the top level entry was not present, or the second
581 * level entry was not present, then the virtual address is invalid (we
582 * say "the page was not mapped").
583 *
584 * Put another way, a 32-bit virtual address is divided up like so:
585 *
586 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
587 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
588 * Index into top Index into second Offset within page
589 * page directory page pagetable page
590 *
a91d74a3
RR
591 * Now, unfortunately, this isn't the whole story: Intel added Physical Address
592 * Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits).
593 * These are held in 64-bit page table entries, so we can now only fit 512
594 * entries in a page, and the neat three-level tree breaks down.
595 *
596 * The result is a four level page table:
597 *
598 * cr3 --> [ 4 Upper ]
599 * [ Level ]
600 * [ Entries ]
601 * [(PUD Page)]---> +---------+
602 * | --------->+---------+
603 * | | | PADDR1 |
604 * Mid-level | | PADDR2 |
605 * (PMD) page | | |
606 * | | Lower-level |
607 * | | (PTE) page |
608 * | | | |
609 * .... ....
610 *
611 *
612 * And the virtual address is decoded as:
613 *
614 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
615 * |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>|
616 * Index into Index into mid Index into lower Offset within page
617 * top entries directory page pagetable page
618 *
619 * It's too hard to switch between these two formats at runtime, so Linux only
620 * supports one or the other depending on whether CONFIG_X86_PAE is set. Many
621 * distributions turn it on, and not just for people with silly amounts of
622 * memory: the larger PTE entries allow room for the NX bit, which lets the
623 * kernel disable execution of pages and increase security.
624 *
625 * This was a problem for lguest, which couldn't run on these distributions;
626 * then Matias Zabaljauregui figured it all out and implemented it, and only a
627 * handful of puppies were crushed in the process!
628 *
629 * Back to our point: the kernel spends a lot of time changing both the
630 * top-level page directory and lower-level pagetable pages. The Guest doesn't
631 * know physical addresses, so while it maintains these page tables exactly
632 * like normal, it also needs to keep the Host informed whenever it makes a
633 * change: the Host will create the real page tables based on the Guests'.
b2b47c21
RR
634 */
635
2e04ef76 636/*
a91d74a3
RR
637 * The Guest calls this after it has set a second-level entry (pte), ie. to map
638 * a page into a process' address space. Wetell the Host the toplevel and
639 * address this corresponds to. The Guest uses one pagetable per process, so
640 * we need to tell the Host which one we're changing (mm->pgd).
2e04ef76 641 */
b7ff99ea
RR
642static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
643 pte_t *ptep)
644{
acdd0b62 645#ifdef CONFIG_X86_PAE
a91d74a3 646 /* PAE needs to hand a 64 bit page table entry, so it uses two args. */
acdd0b62
MZ
647 lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
648 ptep->pte_low, ptep->pte_high);
649#else
4cd8b5e2 650 lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
acdd0b62 651#endif
b7ff99ea
RR
652}
653
a91d74a3 654/* This is the "set and update" combo-meal-deal version. */
07ad157f
RR
655static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
656 pte_t *ptep, pte_t pteval)
657{
90603d15 658 native_set_pte(ptep, pteval);
b7ff99ea 659 lguest_pte_update(mm, addr, ptep);
07ad157f
RR
660}
661
2e04ef76
RR
662/*
663 * The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
acdd0b62 664 * to set a middle-level entry when PAE is activated.
2e04ef76 665 *
acdd0b62 666 * Again, we set the entry then tell the Host which page we changed,
2e04ef76
RR
667 * and the index of the entry we changed.
668 */
acdd0b62
MZ
669#ifdef CONFIG_X86_PAE
670static void lguest_set_pud(pud_t *pudp, pud_t pudval)
671{
672 native_set_pud(pudp, pudval);
673
674 /* 32 bytes aligned pdpt address and the index. */
675 lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
676 (__pa(pudp) & 0x1F) / sizeof(pud_t));
677}
678
679static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
680{
681 native_set_pmd(pmdp, pmdval);
682 lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
683 (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
684}
685#else
686
2e04ef76 687/* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */
07ad157f
RR
688static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
689{
90603d15 690 native_set_pmd(pmdp, pmdval);
ebe0ba84 691 lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
90603d15 692 (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
07ad157f 693}
acdd0b62 694#endif
07ad157f 695
2e04ef76
RR
696/*
697 * There are a couple of legacy places where the kernel sets a PTE, but we
b2b47c21
RR
698 * don't know the top level any more. This is useless for us, since we don't
699 * know which pagetable is changing or what address, so we just tell the Host
700 * to forget all of them. Fortunately, this is very rare.
701 *
702 * ... except in early boot when the kernel sets up the initial pagetables,
ad5173ff
RR
703 * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell
704 * the Host anything changed until we've done the first page table switch,
2e04ef76
RR
705 * which brings boot back to 0.25 seconds.
706 */
07ad157f
RR
707static void lguest_set_pte(pte_t *ptep, pte_t pteval)
708{
90603d15 709 native_set_pte(ptep, pteval);
ad5173ff 710 if (cr3_changed)
4cd8b5e2 711 lazy_hcall1(LHCALL_FLUSH_TLB, 1);
07ad157f
RR
712}
713
acdd0b62 714#ifdef CONFIG_X86_PAE
a91d74a3
RR
715/*
716 * With 64-bit PTE values, we need to be careful setting them: if we set 32
717 * bits at a time, the hardware could see a weird half-set entry. These
718 * versions ensure we update all 64 bits at once.
719 */
acdd0b62
MZ
720static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
721{
722 native_set_pte_atomic(ptep, pte);
723 if (cr3_changed)
724 lazy_hcall1(LHCALL_FLUSH_TLB, 1);
725}
726
a91d74a3
RR
727static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr,
728 pte_t *ptep)
acdd0b62
MZ
729{
730 native_pte_clear(mm, addr, ptep);
731 lguest_pte_update(mm, addr, ptep);
732}
733
a91d74a3 734static void lguest_pmd_clear(pmd_t *pmdp)
acdd0b62
MZ
735{
736 lguest_set_pmd(pmdp, __pmd(0));
737}
738#endif
739
2e04ef76
RR
740/*
741 * Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
b2b47c21
RR
742 * native page table operations. On native hardware you can set a new page
743 * table entry whenever you want, but if you want to remove one you have to do
744 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
745 *
746 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
747 * called when a valid entry is written, not when it's removed (ie. marked not
748 * present). Instead, this is where we come when the Guest wants to remove a
749 * page table entry: we tell the Host to set that entry to 0 (ie. the present
2e04ef76
RR
750 * bit is zero).
751 */
07ad157f
RR
752static void lguest_flush_tlb_single(unsigned long addr)
753{
b2b47c21 754 /* Simply set it to zero: if it was not, it will fault back in. */
4cd8b5e2 755 lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
07ad157f
RR
756}
757
2e04ef76
RR
758/*
759 * This is what happens after the Guest has removed a large number of entries.
b2b47c21 760 * This tells the Host that any of the page table entries for userspace might
2e04ef76
RR
761 * have changed, ie. virtual addresses below PAGE_OFFSET.
762 */
07ad157f
RR
763static void lguest_flush_tlb_user(void)
764{
4cd8b5e2 765 lazy_hcall1(LHCALL_FLUSH_TLB, 0);
07ad157f
RR
766}
767
2e04ef76
RR
768/*
769 * This is called when the kernel page tables have changed. That's not very
b2b47c21 770 * common (unless the Guest is using highmem, which makes the Guest extremely
2e04ef76
RR
771 * slow), so it's worth separating this from the user flushing above.
772 */
07ad157f
RR
773static void lguest_flush_tlb_kernel(void)
774{
4cd8b5e2 775 lazy_hcall1(LHCALL_FLUSH_TLB, 1);
07ad157f
RR
776}
777
b2b47c21
RR
778/*
779 * The Unadvanced Programmable Interrupt Controller.
780 *
781 * This is an attempt to implement the simplest possible interrupt controller.
782 * I spent some time looking though routines like set_irq_chip_and_handler,
783 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
784 * I *think* this is as simple as it gets.
785 *
786 * We can tell the Host what interrupts we want blocked ready for using the
787 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
788 * simple as setting a bit. We don't actually "ack" interrupts as such, we
789 * just mask and unmask them. I wonder if we should be cleverer?
790 */
07ad157f
RR
791static void disable_lguest_irq(unsigned int irq)
792{
793 set_bit(irq, lguest_data.blocked_interrupts);
794}
795
796static void enable_lguest_irq(unsigned int irq)
797{
798 clear_bit(irq, lguest_data.blocked_interrupts);
07ad157f
RR
799}
800
b2b47c21 801/* This structure describes the lguest IRQ controller. */
07ad157f
RR
802static struct irq_chip lguest_irq_controller = {
803 .name = "lguest",
804 .mask = disable_lguest_irq,
805 .mask_ack = disable_lguest_irq,
806 .unmask = enable_lguest_irq,
807};
808
2e04ef76
RR
809/*
810 * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
b2b47c21
RR
811 * interrupt (except 128, which is used for system calls), and then tells the
812 * Linux infrastructure that each interrupt is controlled by our level-based
2e04ef76
RR
813 * lguest interrupt controller.
814 */
07ad157f
RR
815static void __init lguest_init_IRQ(void)
816{
817 unsigned int i;
818
1028375e 819 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
2e04ef76 820 /* Some systems map "vectors" to interrupts weirdly. Not us! */
1028375e
RR
821 __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
822 if (i != SYSCALL_VECTOR)
823 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
07ad157f 824 }
2e04ef76
RR
825
826 /*
827 * This call is required to set up for 4k stacks, where we have
828 * separate stacks for hard and soft interrupts.
829 */
07ad157f
RR
830 irq_ctx_init(smp_processor_id());
831}
832
a91d74a3
RR
833/*
834 * With CONFIG_SPARSE_IRQ, interrupt descriptors are allocated as-needed, so
835 * rather than set them in lguest_init_IRQ we are called here every time an
836 * lguest device needs an interrupt.
837 *
838 * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should
839 * pass that up!
840 */
6db6a5f3
RR
841void lguest_setup_irq(unsigned int irq)
842{
85ac16d0 843 irq_to_desc_alloc_node(irq, 0);
6db6a5f3
RR
844 set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
845 handle_level_irq, "level");
846}
847
b2b47c21
RR
848/*
849 * Time.
850 *
851 * It would be far better for everyone if the Guest had its own clock, but
6c8dca5d 852 * until then the Host gives us the time on every interrupt.
b2b47c21 853 */
07ad157f
RR
854static unsigned long lguest_get_wallclock(void)
855{
6c8dca5d 856 return lguest_data.time.tv_sec;
07ad157f
RR
857}
858
2e04ef76
RR
859/*
860 * The TSC is an Intel thing called the Time Stamp Counter. The Host tells us
a6bd8e13
RR
861 * what speed it runs at, or 0 if it's unusable as a reliable clock source.
862 * This matches what we want here: if we return 0 from this function, the x86
2e04ef76
RR
863 * TSC clock will give up and not register itself.
864 */
e93ef949 865static unsigned long lguest_tsc_khz(void)
3fabc55f
RR
866{
867 return lguest_data.tsc_khz;
868}
869
2e04ef76
RR
870/*
871 * If we can't use the TSC, the kernel falls back to our lower-priority
872 * "lguest_clock", where we read the time value given to us by the Host.
873 */
8e19608e 874static cycle_t lguest_clock_read(struct clocksource *cs)
d7e28ffe 875{
6c8dca5d
RR
876 unsigned long sec, nsec;
877
2e04ef76
RR
878 /*
879 * Since the time is in two parts (seconds and nanoseconds), we risk
3fabc55f
RR
880 * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
881 * and getting 99 and 0. As Linux tends to come apart under the stress
2e04ef76
RR
882 * of time travel, we must be careful:
883 */
6c8dca5d
RR
884 do {
885 /* First we read the seconds part. */
886 sec = lguest_data.time.tv_sec;
2e04ef76
RR
887 /*
888 * This read memory barrier tells the compiler and the CPU that
6c8dca5d 889 * this can't be reordered: we have to complete the above
2e04ef76
RR
890 * before going on.
891 */
6c8dca5d
RR
892 rmb();
893 /* Now we read the nanoseconds part. */
894 nsec = lguest_data.time.tv_nsec;
895 /* Make sure we've done that. */
896 rmb();
897 /* Now if the seconds part has changed, try again. */
898 } while (unlikely(lguest_data.time.tv_sec != sec));
899
3fabc55f 900 /* Our lguest clock is in real nanoseconds. */
6c8dca5d 901 return sec*1000000000ULL + nsec;
d7e28ffe
RR
902}
903
3fabc55f 904/* This is the fallback clocksource: lower priority than the TSC clocksource. */
d7e28ffe
RR
905static struct clocksource lguest_clock = {
906 .name = "lguest",
3fabc55f 907 .rating = 200,
d7e28ffe 908 .read = lguest_clock_read,
6c8dca5d 909 .mask = CLOCKSOURCE_MASK(64),
37250097
RR
910 .mult = 1 << 22,
911 .shift = 22,
05aa026a 912 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
d7e28ffe
RR
913};
914
2e04ef76
RR
915/*
916 * We also need a "struct clock_event_device": Linux asks us to set it to go
d7e28ffe 917 * off some time in the future. Actually, James Morris figured all this out, I
2e04ef76
RR
918 * just applied the patch.
919 */
d7e28ffe
RR
920static int lguest_clockevent_set_next_event(unsigned long delta,
921 struct clock_event_device *evt)
922{
a6bd8e13
RR
923 /* FIXME: I don't think this can ever happen, but James tells me he had
924 * to put this code in. Maybe we should remove it now. Anyone? */
d7e28ffe
RR
925 if (delta < LG_CLOCK_MIN_DELTA) {
926 if (printk_ratelimit())
927 printk(KERN_DEBUG "%s: small delta %lu ns\n",
77bf90ed 928 __func__, delta);
d7e28ffe
RR
929 return -ETIME;
930 }
a6bd8e13
RR
931
932 /* Please wake us this far in the future. */
091ebf07 933 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
d7e28ffe
RR
934 return 0;
935}
936
937static void lguest_clockevent_set_mode(enum clock_event_mode mode,
938 struct clock_event_device *evt)
939{
940 switch (mode) {
941 case CLOCK_EVT_MODE_UNUSED:
942 case CLOCK_EVT_MODE_SHUTDOWN:
943 /* A 0 argument shuts the clock down. */
091ebf07 944 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
d7e28ffe
RR
945 break;
946 case CLOCK_EVT_MODE_ONESHOT:
947 /* This is what we expect. */
948 break;
949 case CLOCK_EVT_MODE_PERIODIC:
950 BUG();
18de5bc4
TG
951 case CLOCK_EVT_MODE_RESUME:
952 break;
d7e28ffe
RR
953 }
954}
955
956/* This describes our primitive timer chip. */
957static struct clock_event_device lguest_clockevent = {
958 .name = "lguest",
959 .features = CLOCK_EVT_FEAT_ONESHOT,
960 .set_next_event = lguest_clockevent_set_next_event,
961 .set_mode = lguest_clockevent_set_mode,
962 .rating = INT_MAX,
963 .mult = 1,
964 .shift = 0,
965 .min_delta_ns = LG_CLOCK_MIN_DELTA,
966 .max_delta_ns = LG_CLOCK_MAX_DELTA,
967};
968
2e04ef76
RR
969/*
970 * This is the Guest timer interrupt handler (hardware interrupt 0). We just
971 * call the clockevent infrastructure and it does whatever needs doing.
972 */
07ad157f
RR
973static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
974{
d7e28ffe
RR
975 unsigned long flags;
976
977 /* Don't interrupt us while this is running. */
978 local_irq_save(flags);
979 lguest_clockevent.event_handler(&lguest_clockevent);
980 local_irq_restore(flags);
07ad157f
RR
981}
982
2e04ef76
RR
983/*
984 * At some point in the boot process, we get asked to set up our timing
b2b47c21
RR
985 * infrastructure. The kernel doesn't expect timer interrupts before this, but
986 * we cleverly initialized the "blocked_interrupts" field of "struct
2e04ef76
RR
987 * lguest_data" so that timer interrupts were blocked until now.
988 */
07ad157f
RR
989static void lguest_time_init(void)
990{
b2b47c21 991 /* Set up the timer interrupt (0) to go to our simple timer routine */
07ad157f 992 set_irq_handler(0, lguest_time_irq);
07ad157f 993
d7e28ffe
RR
994 clocksource_register(&lguest_clock);
995
b2b47c21
RR
996 /* We can't set cpumask in the initializer: damn C limitations! Set it
997 * here and register our timer device. */
320ab2b0 998 lguest_clockevent.cpumask = cpumask_of(0);
d7e28ffe
RR
999 clockevents_register_device(&lguest_clockevent);
1000
b2b47c21 1001 /* Finally, we unblock the timer interrupt. */
d7e28ffe 1002 enable_lguest_irq(0);
07ad157f
RR
1003}
1004
b2b47c21
RR
1005/*
1006 * Miscellaneous bits and pieces.
1007 *
1008 * Here is an oddball collection of functions which the Guest needs for things
1009 * to work. They're pretty simple.
1010 */
1011
2e04ef76
RR
1012/*
1013 * The Guest needs to tell the Host what stack it expects traps to use. For
b2b47c21
RR
1014 * native hardware, this is part of the Task State Segment mentioned above in
1015 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
1016 *
1017 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
1018 * segment), the privilege level (we're privilege level 1, the Host is 0 and
1019 * will not tolerate us trying to use that), the stack pointer, and the number
2e04ef76
RR
1020 * of pages in the stack.
1021 */
faca6227 1022static void lguest_load_sp0(struct tss_struct *tss,
a6bd8e13 1023 struct thread_struct *thread)
07ad157f 1024{
4cd8b5e2
MZ
1025 lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
1026 THREAD_SIZE / PAGE_SIZE);
07ad157f
RR
1027}
1028
b2b47c21 1029/* Let's just say, I wouldn't do debugging under a Guest. */
07ad157f
RR
1030static void lguest_set_debugreg(int regno, unsigned long value)
1031{
1032 /* FIXME: Implement */
1033}
1034
2e04ef76
RR
1035/*
1036 * There are times when the kernel wants to make sure that no memory writes are
b2b47c21
RR
1037 * caught in the cache (that they've all reached real hardware devices). This
1038 * doesn't matter for the Guest which has virtual hardware.
1039 *
1040 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
1041 * (clflush) instruction is available and the kernel uses that. Otherwise, it
1042 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
1043 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
1044 * ignore clflush, but replace wbinvd.
1045 */
07ad157f
RR
1046static void lguest_wbinvd(void)
1047{
1048}
1049
2e04ef76
RR
1050/*
1051 * If the Guest expects to have an Advanced Programmable Interrupt Controller,
b2b47c21
RR
1052 * we play dumb by ignoring writes and returning 0 for reads. So it's no
1053 * longer Programmable nor Controlling anything, and I don't think 8 lines of
1054 * code qualifies for Advanced. It will also never interrupt anything. It
2e04ef76
RR
1055 * does, however, allow us to get through the Linux boot code.
1056 */
07ad157f 1057#ifdef CONFIG_X86_LOCAL_APIC
ad66dd34 1058static void lguest_apic_write(u32 reg, u32 v)
07ad157f
RR
1059{
1060}
1061
ad66dd34 1062static u32 lguest_apic_read(u32 reg)
07ad157f
RR
1063{
1064 return 0;
1065}
511d9d34
SS
1066
1067static u64 lguest_apic_icr_read(void)
1068{
1069 return 0;
1070}
1071
1072static void lguest_apic_icr_write(u32 low, u32 id)
1073{
1074 /* Warn to see if there's any stray references */
1075 WARN_ON(1);
1076}
1077
1078static void lguest_apic_wait_icr_idle(void)
1079{
1080 return;
1081}
1082
1083static u32 lguest_apic_safe_wait_icr_idle(void)
1084{
1085 return 0;
1086}
1087
c1eeb2de
YL
1088static void set_lguest_basic_apic_ops(void)
1089{
1090 apic->read = lguest_apic_read;
1091 apic->write = lguest_apic_write;
1092 apic->icr_read = lguest_apic_icr_read;
1093 apic->icr_write = lguest_apic_icr_write;
1094 apic->wait_icr_idle = lguest_apic_wait_icr_idle;
1095 apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
511d9d34 1096};
07ad157f
RR
1097#endif
1098
b2b47c21 1099/* STOP! Until an interrupt comes in. */
07ad157f
RR
1100static void lguest_safe_halt(void)
1101{
091ebf07 1102 hcall(LHCALL_HALT, 0, 0, 0, 0);
07ad157f
RR
1103}
1104
2e04ef76
RR
1105/*
1106 * The SHUTDOWN hypercall takes a string to describe what's happening, and
a6bd8e13 1107 * an argument which says whether this to restart (reboot) the Guest or not.
b2b47c21
RR
1108 *
1109 * Note that the Host always prefers that the Guest speak in physical addresses
2e04ef76
RR
1110 * rather than virtual addresses, so we use __pa() here.
1111 */
07ad157f
RR
1112static void lguest_power_off(void)
1113{
091ebf07
RR
1114 hcall(LHCALL_SHUTDOWN, __pa("Power down"),
1115 LGUEST_SHUTDOWN_POWEROFF, 0, 0);
07ad157f
RR
1116}
1117
b2b47c21
RR
1118/*
1119 * Panicing.
1120 *
1121 * Don't. But if you did, this is what happens.
1122 */
07ad157f
RR
1123static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
1124{
091ebf07 1125 hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
b2b47c21 1126 /* The hcall won't return, but to keep gcc happy, we're "done". */
07ad157f
RR
1127 return NOTIFY_DONE;
1128}
1129
1130static struct notifier_block paniced = {
1131 .notifier_call = lguest_panic
1132};
1133
b2b47c21 1134/* Setting up memory is fairly easy. */
07ad157f
RR
1135static __init char *lguest_memory_setup(void)
1136{
2e04ef76
RR
1137 /*
1138 *The Linux bootloader header contains an "e820" memory map: the
1139 * Launcher populated the first entry with our memory limit.
1140 */
d0be6bde 1141 e820_add_region(boot_params.e820_map[0].addr,
30c82645
PA
1142 boot_params.e820_map[0].size,
1143 boot_params.e820_map[0].type);
b2b47c21
RR
1144
1145 /* This string is for the boot messages. */
07ad157f
RR
1146 return "LGUEST";
1147}
1148
2e04ef76
RR
1149/*
1150 * We will eventually use the virtio console device to produce console output,
e1e72965 1151 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
2e04ef76
RR
1152 * console output.
1153 */
19f1537b
RR
1154static __init int early_put_chars(u32 vtermno, const char *buf, int count)
1155{
1156 char scratch[17];
1157 unsigned int len = count;
1158
2e04ef76 1159 /* We use a nul-terminated string, so we make a copy. Icky, huh? */
19f1537b
RR
1160 if (len > sizeof(scratch) - 1)
1161 len = sizeof(scratch) - 1;
1162 scratch[len] = '\0';
1163 memcpy(scratch, buf, len);
091ebf07 1164 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
19f1537b
RR
1165
1166 /* This routine returns the number of bytes actually written. */
1167 return len;
1168}
1169
2e04ef76
RR
1170/*
1171 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
1172 * Launcher to reboot us.
1173 */
a6bd8e13
RR
1174static void lguest_restart(char *reason)
1175{
091ebf07 1176 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
a6bd8e13
RR
1177}
1178
b2b47c21
RR
1179/*G:050
1180 * Patching (Powerfully Placating Performance Pedants)
1181 *
a6bd8e13
RR
1182 * We have already seen that pv_ops structures let us replace simple native
1183 * instructions with calls to the appropriate back end all throughout the
1184 * kernel. This allows the same kernel to run as a Guest and as a native
b2b47c21
RR
1185 * kernel, but it's slow because of all the indirect branches.
1186 *
1187 * Remember that David Wheeler quote about "Any problem in computer science can
1188 * be solved with another layer of indirection"? The rest of that quote is
1189 * "... But that usually will create another problem." This is the first of
1190 * those problems.
1191 *
1192 * Our current solution is to allow the paravirt back end to optionally patch
1193 * over the indirect calls to replace them with something more efficient. We
a32a8813
RR
1194 * patch two of the simplest of the most commonly called functions: disable
1195 * interrupts and save interrupts. We usually have 6 or 10 bytes to patch
1196 * into: the Guest versions of these operations are small enough that we can
1197 * fit comfortably.
b2b47c21
RR
1198 *
1199 * First we need assembly templates of each of the patchable Guest operations,
2e04ef76
RR
1200 * and these are in i386_head.S.
1201 */
b2b47c21
RR
1202
1203/*G:060 We construct a table from the assembler templates: */
07ad157f
RR
1204static const struct lguest_insns
1205{
1206 const char *start, *end;
1207} lguest_insns[] = {
93b1eab3 1208 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
93b1eab3 1209 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
07ad157f 1210};
b2b47c21 1211
2e04ef76
RR
1212/*
1213 * Now our patch routine is fairly simple (based on the native one in
b2b47c21 1214 * paravirt.c). If we have a replacement, we copy it in and return how much of
2e04ef76
RR
1215 * the available space we used.
1216 */
ab144f5e
AK
1217static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
1218 unsigned long addr, unsigned len)
07ad157f
RR
1219{
1220 unsigned int insn_len;
1221
b2b47c21 1222 /* Don't do anything special if we don't have a replacement */
07ad157f 1223 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
ab144f5e 1224 return paravirt_patch_default(type, clobber, ibuf, addr, len);
07ad157f
RR
1225
1226 insn_len = lguest_insns[type].end - lguest_insns[type].start;
1227
2e04ef76 1228 /* Similarly if it can't fit (doesn't happen, but let's be thorough). */
07ad157f 1229 if (len < insn_len)
ab144f5e 1230 return paravirt_patch_default(type, clobber, ibuf, addr, len);
07ad157f 1231
b2b47c21 1232 /* Copy in our instructions. */
ab144f5e 1233 memcpy(ibuf, lguest_insns[type].start, insn_len);
07ad157f
RR
1234 return insn_len;
1235}
1236
2e04ef76
RR
1237/*G:029
1238 * Once we get to lguest_init(), we know we're a Guest. The various
a6bd8e13 1239 * pv_ops structures in the kernel provide points for (almost) every routine we
2e04ef76
RR
1240 * have to override to avoid privileged instructions.
1241 */
814a0e5c 1242__init void lguest_init(void)
07ad157f 1243{
2e04ef76 1244 /* We're under lguest. */
93b1eab3 1245 pv_info.name = "lguest";
2e04ef76 1246 /* Paravirt is enabled. */
93b1eab3 1247 pv_info.paravirt_enabled = 1;
2e04ef76 1248 /* We're running at privilege level 1, not 0 as normal. */
93b1eab3 1249 pv_info.kernel_rpl = 1;
2e04ef76 1250 /* Everyone except Xen runs with this set. */
acdd0b62 1251 pv_info.shared_kernel_pmd = 1;
07ad157f 1252
2e04ef76
RR
1253 /*
1254 * We set up all the lguest overrides for sensitive operations. These
1255 * are detailed with the operations themselves.
1256 */
93b1eab3 1257
2e04ef76 1258 /* Interrupt-related operations */
ecb93d1c 1259 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
61f4bc83 1260 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
ecb93d1c 1261 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
61f4bc83 1262 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
93b1eab3
JF
1263 pv_irq_ops.safe_halt = lguest_safe_halt;
1264
2e04ef76 1265 /* Setup operations */
93b1eab3
JF
1266 pv_init_ops.patch = lguest_patch;
1267
2e04ef76 1268 /* Intercepts of various CPU instructions */
93b1eab3
JF
1269 pv_cpu_ops.load_gdt = lguest_load_gdt;
1270 pv_cpu_ops.cpuid = lguest_cpuid;
1271 pv_cpu_ops.load_idt = lguest_load_idt;
1272 pv_cpu_ops.iret = lguest_iret;
faca6227 1273 pv_cpu_ops.load_sp0 = lguest_load_sp0;
93b1eab3
JF
1274 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
1275 pv_cpu_ops.set_ldt = lguest_set_ldt;
1276 pv_cpu_ops.load_tls = lguest_load_tls;
1277 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
1278 pv_cpu_ops.clts = lguest_clts;
1279 pv_cpu_ops.read_cr0 = lguest_read_cr0;
1280 pv_cpu_ops.write_cr0 = lguest_write_cr0;
1281 pv_cpu_ops.read_cr4 = lguest_read_cr4;
1282 pv_cpu_ops.write_cr4 = lguest_write_cr4;
1283 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1284 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1285 pv_cpu_ops.wbinvd = lguest_wbinvd;
224101ed
JF
1286 pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
1287 pv_cpu_ops.end_context_switch = lguest_end_context_switch;
93b1eab3 1288
2e04ef76 1289 /* Pagetable management */
93b1eab3
JF
1290 pv_mmu_ops.write_cr3 = lguest_write_cr3;
1291 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
1292 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
1293 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
1294 pv_mmu_ops.set_pte = lguest_set_pte;
1295 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1296 pv_mmu_ops.set_pmd = lguest_set_pmd;
acdd0b62
MZ
1297#ifdef CONFIG_X86_PAE
1298 pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
1299 pv_mmu_ops.pte_clear = lguest_pte_clear;
1300 pv_mmu_ops.pmd_clear = lguest_pmd_clear;
1301 pv_mmu_ops.set_pud = lguest_set_pud;
1302#endif
93b1eab3
JF
1303 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1304 pv_mmu_ops.read_cr3 = lguest_read_cr3;
8965c1c0 1305 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
b407fc57 1306 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
b7ff99ea
RR
1307 pv_mmu_ops.pte_update = lguest_pte_update;
1308 pv_mmu_ops.pte_update_defer = lguest_pte_update;
93b1eab3 1309
07ad157f 1310#ifdef CONFIG_X86_LOCAL_APIC
2e04ef76 1311 /* APIC read/write intercepts */
c1eeb2de 1312 set_lguest_basic_apic_ops();
07ad157f 1313#endif
93b1eab3 1314
6b18ae3e 1315 x86_init.resources.memory_setup = lguest_memory_setup;
66bcaf0b 1316 x86_init.irqs.intr_init = lguest_init_IRQ;
845b3944 1317 x86_init.timers.timer_init = lguest_time_init;
2d826404 1318 x86_platform.calibrate_tsc = lguest_tsc_khz;
7bd867df 1319 x86_platform.get_wallclock = lguest_get_wallclock;
6b18ae3e 1320
2e04ef76
RR
1321 /*
1322 * Now is a good time to look at the implementations of these functions
1323 * before returning to the rest of lguest_init().
1324 */
b2b47c21 1325
2e04ef76
RR
1326 /*G:070
1327 * Now we've seen all the paravirt_ops, we return to
b2b47c21 1328 * lguest_init() where the rest of the fairly chaotic boot setup
2e04ef76
RR
1329 * occurs.
1330 */
07ad157f 1331
2e04ef76
RR
1332 /*
1333 * The stack protector is a weird thing where gcc places a canary
2cb7878a
RR
1334 * value on the stack and then checks it on return. This file is
1335 * compiled with -fno-stack-protector it, so we got this far without
1336 * problems. The value of the canary is kept at offset 20 from the
1337 * %gs register, so we need to set that up before calling C functions
2e04ef76
RR
1338 * in other files.
1339 */
2cb7878a 1340 setup_stack_canary_segment(0);
2e04ef76
RR
1341
1342 /*
1343 * We could just call load_stack_canary_segment(), but we might as well
1344 * call switch_to_new_gdt() which loads the whole table and sets up the
1345 * per-cpu segment descriptor register %fs as well.
1346 */
2cb7878a
RR
1347 switch_to_new_gdt(0);
1348
a91d74a3 1349 /* We actually boot with all memory mapped, but let's say 128MB. */
5d006d8d
RR
1350 max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
1351
2e04ef76
RR
1352 /*
1353 * The Host<->Guest Switcher lives at the top of our address space, and
a6bd8e13 1354 * the Host told us how big it is when we made LGUEST_INIT hypercall:
2e04ef76
RR
1355 * it put the answer in lguest_data.reserve_mem
1356 */
07ad157f
RR
1357 reserve_top_address(lguest_data.reserve_mem);
1358
2e04ef76
RR
1359 /*
1360 * If we don't initialize the lock dependency checker now, it crashes
cdae0ad5 1361 * atomic_notifier_chain_register, then paravirt_disable_iospace.
2e04ef76 1362 */
07ad157f
RR
1363 lockdep_init();
1364
cdae0ad5
RR
1365 /* Hook in our special panic hypercall code. */
1366 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
1367
2e04ef76
RR
1368 /*
1369 * The IDE code spends about 3 seconds probing for disks: if we reserve
b2b47c21
RR
1370 * all the I/O ports up front it can't get them and so doesn't probe.
1371 * Other device drivers are similar (but less severe). This cuts the
2e04ef76
RR
1372 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds.
1373 */
07ad157f
RR
1374 paravirt_disable_iospace();
1375
2e04ef76
RR
1376 /*
1377 * This is messy CPU setup stuff which the native boot code does before
1378 * start_kernel, so we have to do, too:
1379 */
07ad157f
RR
1380 cpu_detect(&new_cpu_data);
1381 /* head.S usually sets up the first capability word, so do it here. */
1382 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1383
1384 /* Math is always hard! */
1385 new_cpu_data.hard_math = 1;
1386
a6bd8e13 1387 /* We don't have features. We have puppies! Puppies! */
07ad157f
RR
1388#ifdef CONFIG_X86_MCE
1389 mce_disabled = 1;
1390#endif
07ad157f
RR
1391#ifdef CONFIG_ACPI
1392 acpi_disabled = 1;
07ad157f
RR
1393#endif
1394
2e04ef76
RR
1395 /*
1396 * We set the preferred console to "hvc". This is the "hypervisor
b2b47c21 1397 * virtual console" driver written by the PowerPC people, which we also
2e04ef76
RR
1398 * adapted for lguest's use.
1399 */
07ad157f
RR
1400 add_preferred_console("hvc", 0, NULL);
1401
19f1537b
RR
1402 /* Register our very early console. */
1403 virtio_cons_early_init(early_put_chars);
1404
2e04ef76
RR
1405 /*
1406 * Last of all, we set the power management poweroff hook to point to
a6bd8e13 1407 * the Guest routine to power off, and the reboot hook to our restart
2e04ef76
RR
1408 * routine.
1409 */
07ad157f 1410 pm_power_off = lguest_power_off;
ec04b13f 1411 machine_ops.restart = lguest_restart;
a6bd8e13 1412
2e04ef76
RR
1413 /*
1414 * Now we're set up, call i386_start_kernel() in head32.c and we proceed
1415 * to boot as normal. It never returns.
1416 */
f0d43100 1417 i386_start_kernel();
07ad157f 1418}
b2b47c21
RR
1419/*
1420 * This marks the end of stage II of our journey, The Guest.
1421 *
e1e72965
RR
1422 * It is now time for us to explore the layer of virtual drivers and complete
1423 * our understanding of the Guest in "make Drivers".
b2b47c21 1424 */