]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/kvm/kvm_main.c
KVM: Use the scheduler preemption notifiers to make kvm preemptible
[net-next-2.6.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
e495606d
AK
19#include "x86_emulate.h"
20#include "segment_descriptor.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
6aa8b732 40
e495606d
AK
41#include <asm/processor.h>
42#include <asm/msr.h>
43#include <asm/io.h>
44#include <asm/uaccess.h>
45#include <asm/desc.h>
6aa8b732
AK
46
47MODULE_AUTHOR("Qumranet");
48MODULE_LICENSE("GPL");
49
133de902
AK
50static DEFINE_SPINLOCK(kvm_lock);
51static LIST_HEAD(vm_list);
52
1b6c0168
AK
53static cpumask_t cpus_hardware_enabled;
54
6aa8b732 55struct kvm_arch_ops *kvm_arch_ops;
1165f5fe 56
15ad7146
AK
57static __read_mostly struct preempt_ops kvm_preempt_ops;
58
1165f5fe 59#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
6aa8b732
AK
60
61static struct kvm_stats_debugfs_item {
62 const char *name;
1165f5fe 63 int offset;
6aa8b732
AK
64 struct dentry *dentry;
65} debugfs_entries[] = {
1165f5fe
AK
66 { "pf_fixed", STAT_OFFSET(pf_fixed) },
67 { "pf_guest", STAT_OFFSET(pf_guest) },
68 { "tlb_flush", STAT_OFFSET(tlb_flush) },
69 { "invlpg", STAT_OFFSET(invlpg) },
70 { "exits", STAT_OFFSET(exits) },
71 { "io_exits", STAT_OFFSET(io_exits) },
72 { "mmio_exits", STAT_OFFSET(mmio_exits) },
73 { "signal_exits", STAT_OFFSET(signal_exits) },
74 { "irq_window", STAT_OFFSET(irq_window_exits) },
75 { "halt_exits", STAT_OFFSET(halt_exits) },
76 { "request_irq", STAT_OFFSET(request_irq_exits) },
77 { "irq_exits", STAT_OFFSET(irq_exits) },
e6adf283 78 { "light_exits", STAT_OFFSET(light_exits) },
2cc51560 79 { "efer_reload", STAT_OFFSET(efer_reload) },
1165f5fe 80 { NULL }
6aa8b732
AK
81};
82
83static struct dentry *debugfs_dir;
84
85#define MAX_IO_MSRS 256
86
707d92fa
RR
87#define CR0_RESERVED_BITS \
88 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
89 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
90 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
66aee91a
RR
91#define CR4_RESERVED_BITS \
92 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
93 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
94 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
95 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
96
7075bc81 97#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
6aa8b732
AK
98#define EFER_RESERVED_BITS 0xfffffffffffff2fe
99
05b3e0c2 100#ifdef CONFIG_X86_64
6aa8b732
AK
101// LDT or TSS descriptor in the GDT. 16 bytes.
102struct segment_descriptor_64 {
103 struct segment_descriptor s;
104 u32 base_higher;
105 u32 pad_zero;
106};
107
108#endif
109
bccf2150
AK
110static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
111 unsigned long arg);
112
6aa8b732
AK
113unsigned long segment_base(u16 selector)
114{
115 struct descriptor_table gdt;
116 struct segment_descriptor *d;
117 unsigned long table_base;
118 typedef unsigned long ul;
119 unsigned long v;
120
121 if (selector == 0)
122 return 0;
123
124 asm ("sgdt %0" : "=m"(gdt));
125 table_base = gdt.base;
126
127 if (selector & 4) { /* from ldt */
128 u16 ldt_selector;
129
130 asm ("sldt %0" : "=g"(ldt_selector));
131 table_base = segment_base(ldt_selector);
132 }
133 d = (struct segment_descriptor *)(table_base + (selector & ~7));
134 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
05b3e0c2 135#ifdef CONFIG_X86_64
6aa8b732
AK
136 if (d->system == 0
137 && (d->type == 2 || d->type == 9 || d->type == 11))
138 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
139#endif
140 return v;
141}
142EXPORT_SYMBOL_GPL(segment_base);
143
5aacf0ca
JM
144static inline int valid_vcpu(int n)
145{
146 return likely(n >= 0 && n < KVM_MAX_VCPUS);
147}
148
d27d4aca
AK
149int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
150 void *dest)
6aa8b732
AK
151{
152 unsigned char *host_buf = dest;
153 unsigned long req_size = size;
154
155 while (size) {
156 hpa_t paddr;
157 unsigned now;
158 unsigned offset;
159 hva_t guest_buf;
160
161 paddr = gva_to_hpa(vcpu, addr);
162
163 if (is_error_hpa(paddr))
164 break;
165
166 guest_buf = (hva_t)kmap_atomic(
167 pfn_to_page(paddr >> PAGE_SHIFT),
168 KM_USER0);
169 offset = addr & ~PAGE_MASK;
170 guest_buf |= offset;
171 now = min(size, PAGE_SIZE - offset);
172 memcpy(host_buf, (void*)guest_buf, now);
173 host_buf += now;
174 addr += now;
175 size -= now;
176 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
177 }
178 return req_size - size;
179}
180EXPORT_SYMBOL_GPL(kvm_read_guest);
181
d27d4aca
AK
182int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
183 void *data)
6aa8b732
AK
184{
185 unsigned char *host_buf = data;
186 unsigned long req_size = size;
187
188 while (size) {
189 hpa_t paddr;
190 unsigned now;
191 unsigned offset;
192 hva_t guest_buf;
ab51a434 193 gfn_t gfn;
6aa8b732
AK
194
195 paddr = gva_to_hpa(vcpu, addr);
196
197 if (is_error_hpa(paddr))
198 break;
199
ab51a434
UL
200 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
201 mark_page_dirty(vcpu->kvm, gfn);
6aa8b732
AK
202 guest_buf = (hva_t)kmap_atomic(
203 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
204 offset = addr & ~PAGE_MASK;
205 guest_buf |= offset;
206 now = min(size, PAGE_SIZE - offset);
207 memcpy((void*)guest_buf, host_buf, now);
208 host_buf += now;
209 addr += now;
210 size -= now;
211 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
212 }
213 return req_size - size;
214}
215EXPORT_SYMBOL_GPL(kvm_write_guest);
216
7702fd1f
AK
217void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
218{
219 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
220 return;
221
222 vcpu->guest_fpu_loaded = 1;
223 fx_save(vcpu->host_fx_image);
224 fx_restore(vcpu->guest_fx_image);
225}
226EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
227
228void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
229{
230 if (!vcpu->guest_fpu_loaded)
231 return;
232
233 vcpu->guest_fpu_loaded = 0;
234 fx_save(vcpu->guest_fx_image);
235 fx_restore(vcpu->host_fx_image);
236}
237EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
238
bccf2150
AK
239/*
240 * Switches to specified vcpu, until a matching vcpu_put()
241 */
242static void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 243{
15ad7146
AK
244 int cpu;
245
bccf2150 246 mutex_lock(&vcpu->mutex);
15ad7146
AK
247 cpu = get_cpu();
248 preempt_notifier_register(&vcpu->preempt_notifier);
249 kvm_arch_ops->vcpu_load(vcpu, cpu);
250 put_cpu();
6aa8b732
AK
251}
252
6aa8b732
AK
253static void vcpu_put(struct kvm_vcpu *vcpu)
254{
15ad7146 255 preempt_disable();
6aa8b732 256 kvm_arch_ops->vcpu_put(vcpu);
15ad7146
AK
257 preempt_notifier_unregister(&vcpu->preempt_notifier);
258 preempt_enable();
6aa8b732
AK
259 mutex_unlock(&vcpu->mutex);
260}
261
d9e368d6
AK
262static void ack_flush(void *_completed)
263{
264 atomic_t *completed = _completed;
265
266 atomic_inc(completed);
267}
268
269void kvm_flush_remote_tlbs(struct kvm *kvm)
270{
271 int i, cpu, needed;
272 cpumask_t cpus;
273 struct kvm_vcpu *vcpu;
274 atomic_t completed;
275
276 atomic_set(&completed, 0);
277 cpus_clear(cpus);
278 needed = 0;
fb3f0f51
RR
279 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
280 vcpu = kvm->vcpus[i];
281 if (!vcpu)
282 continue;
d9e368d6
AK
283 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
284 continue;
285 cpu = vcpu->cpu;
286 if (cpu != -1 && cpu != raw_smp_processor_id())
287 if (!cpu_isset(cpu, cpus)) {
288 cpu_set(cpu, cpus);
289 ++needed;
290 }
291 }
292
293 /*
294 * We really want smp_call_function_mask() here. But that's not
295 * available, so ipi all cpus in parallel and wait for them
296 * to complete.
297 */
298 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
299 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
300 while (atomic_read(&completed) != needed) {
301 cpu_relax();
302 barrier();
303 }
304}
305
fb3f0f51
RR
306int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
307{
308 struct page *page;
309 int r;
310
311 mutex_init(&vcpu->mutex);
312 vcpu->cpu = -1;
313 vcpu->mmu.root_hpa = INVALID_PAGE;
314 vcpu->kvm = kvm;
315 vcpu->vcpu_id = id;
316
317 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
318 if (!page) {
319 r = -ENOMEM;
320 goto fail;
321 }
322 vcpu->run = page_address(page);
323
324 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
325 if (!page) {
326 r = -ENOMEM;
327 goto fail_free_run;
328 }
329 vcpu->pio_data = page_address(page);
330
331 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
332 FX_IMAGE_ALIGN);
333 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
334
335 r = kvm_mmu_create(vcpu);
336 if (r < 0)
337 goto fail_free_pio_data;
338
339 return 0;
340
341fail_free_pio_data:
342 free_page((unsigned long)vcpu->pio_data);
343fail_free_run:
344 free_page((unsigned long)vcpu->run);
345fail:
346 return -ENOMEM;
347}
348EXPORT_SYMBOL_GPL(kvm_vcpu_init);
349
350void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
351{
352 kvm_mmu_destroy(vcpu);
353 free_page((unsigned long)vcpu->pio_data);
354 free_page((unsigned long)vcpu->run);
355}
356EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
357
f17abe9a 358static struct kvm *kvm_create_vm(void)
6aa8b732
AK
359{
360 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
6aa8b732
AK
361
362 if (!kvm)
f17abe9a 363 return ERR_PTR(-ENOMEM);
6aa8b732 364
74906345 365 kvm_io_bus_init(&kvm->pio_bus);
6aa8b732
AK
366 spin_lock_init(&kvm->lock);
367 INIT_LIST_HEAD(&kvm->active_mmu_pages);
2eeb2e94 368 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
369 spin_lock(&kvm_lock);
370 list_add(&kvm->vm_list, &vm_list);
371 spin_unlock(&kvm_lock);
f17abe9a
AK
372 return kvm;
373}
374
375static int kvm_dev_open(struct inode *inode, struct file *filp)
376{
6aa8b732
AK
377 return 0;
378}
379
380/*
381 * Free any memory in @free but not in @dont.
382 */
383static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
384 struct kvm_memory_slot *dont)
385{
386 int i;
387
388 if (!dont || free->phys_mem != dont->phys_mem)
389 if (free->phys_mem) {
390 for (i = 0; i < free->npages; ++i)
55a54f79
AK
391 if (free->phys_mem[i])
392 __free_page(free->phys_mem[i]);
6aa8b732
AK
393 vfree(free->phys_mem);
394 }
395
396 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
397 vfree(free->dirty_bitmap);
398
8b6d44c7 399 free->phys_mem = NULL;
6aa8b732 400 free->npages = 0;
8b6d44c7 401 free->dirty_bitmap = NULL;
6aa8b732
AK
402}
403
404static void kvm_free_physmem(struct kvm *kvm)
405{
406 int i;
407
408 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 409 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
410}
411
039576c0
AK
412static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
413{
414 int i;
415
416 for (i = 0; i < 2; ++i)
417 if (vcpu->pio.guest_pages[i]) {
418 __free_page(vcpu->pio.guest_pages[i]);
419 vcpu->pio.guest_pages[i] = NULL;
420 }
421}
422
7b53aa56
AK
423static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
424{
7b53aa56
AK
425 vcpu_load(vcpu);
426 kvm_mmu_unload(vcpu);
427 vcpu_put(vcpu);
428}
429
6aa8b732
AK
430static void kvm_free_vcpus(struct kvm *kvm)
431{
432 unsigned int i;
433
7b53aa56
AK
434 /*
435 * Unpin any mmu pages first.
436 */
437 for (i = 0; i < KVM_MAX_VCPUS; ++i)
fb3f0f51
RR
438 if (kvm->vcpus[i])
439 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
440 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
441 if (kvm->vcpus[i]) {
442 kvm_arch_ops->vcpu_free(kvm->vcpus[i]);
443 kvm->vcpus[i] = NULL;
444 }
445 }
446
6aa8b732
AK
447}
448
449static int kvm_dev_release(struct inode *inode, struct file *filp)
450{
f17abe9a
AK
451 return 0;
452}
6aa8b732 453
f17abe9a
AK
454static void kvm_destroy_vm(struct kvm *kvm)
455{
133de902
AK
456 spin_lock(&kvm_lock);
457 list_del(&kvm->vm_list);
458 spin_unlock(&kvm_lock);
74906345 459 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 460 kvm_io_bus_destroy(&kvm->mmio_bus);
6aa8b732
AK
461 kvm_free_vcpus(kvm);
462 kvm_free_physmem(kvm);
463 kfree(kvm);
f17abe9a
AK
464}
465
466static int kvm_vm_release(struct inode *inode, struct file *filp)
467{
468 struct kvm *kvm = filp->private_data;
469
470 kvm_destroy_vm(kvm);
6aa8b732
AK
471 return 0;
472}
473
474static void inject_gp(struct kvm_vcpu *vcpu)
475{
476 kvm_arch_ops->inject_gp(vcpu, 0);
477}
478
1342d353
AK
479/*
480 * Load the pae pdptrs. Return true is they are all valid.
481 */
482static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
6aa8b732
AK
483{
484 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
1342d353 485 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
6aa8b732 486 int i;
6aa8b732 487 u64 *pdpt;
1342d353 488 int ret;
954bbbc2 489 struct page *page;
c820c2aa 490 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
6aa8b732
AK
491
492 spin_lock(&vcpu->kvm->lock);
954bbbc2 493 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
c820c2aa
RR
494 if (!page) {
495 ret = 0;
496 goto out;
497 }
498
954bbbc2 499 pdpt = kmap_atomic(page, KM_USER0);
c820c2aa
RR
500 memcpy(pdpte, pdpt+offset, sizeof(pdpte));
501 kunmap_atomic(pdpt, KM_USER0);
6aa8b732 502
c820c2aa
RR
503 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
504 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
1342d353
AK
505 ret = 0;
506 goto out;
507 }
6aa8b732 508 }
c820c2aa 509 ret = 1;
6aa8b732 510
c820c2aa 511 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
1342d353 512out:
6aa8b732
AK
513 spin_unlock(&vcpu->kvm->lock);
514
1342d353 515 return ret;
6aa8b732
AK
516}
517
518void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
519{
707d92fa 520 if (cr0 & CR0_RESERVED_BITS) {
6aa8b732
AK
521 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
522 cr0, vcpu->cr0);
523 inject_gp(vcpu);
524 return;
525 }
526
707d92fa 527 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
6aa8b732
AK
528 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
529 inject_gp(vcpu);
530 return;
531 }
532
707d92fa 533 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
6aa8b732
AK
534 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
535 "and a clear PE flag\n");
536 inject_gp(vcpu);
537 return;
538 }
539
707d92fa 540 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
05b3e0c2 541#ifdef CONFIG_X86_64
6aa8b732
AK
542 if ((vcpu->shadow_efer & EFER_LME)) {
543 int cs_db, cs_l;
544
545 if (!is_pae(vcpu)) {
546 printk(KERN_DEBUG "set_cr0: #GP, start paging "
547 "in long mode while PAE is disabled\n");
548 inject_gp(vcpu);
549 return;
550 }
551 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
552 if (cs_l) {
553 printk(KERN_DEBUG "set_cr0: #GP, start paging "
554 "in long mode while CS.L == 1\n");
555 inject_gp(vcpu);
556 return;
557
558 }
559 } else
560#endif
1342d353 561 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
6aa8b732
AK
562 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
563 "reserved bits\n");
564 inject_gp(vcpu);
565 return;
566 }
567
568 }
569
570 kvm_arch_ops->set_cr0(vcpu, cr0);
571 vcpu->cr0 = cr0;
572
573 spin_lock(&vcpu->kvm->lock);
574 kvm_mmu_reset_context(vcpu);
575 spin_unlock(&vcpu->kvm->lock);
576 return;
577}
578EXPORT_SYMBOL_GPL(set_cr0);
579
580void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
581{
582 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
583}
584EXPORT_SYMBOL_GPL(lmsw);
585
586void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
587{
66aee91a 588 if (cr4 & CR4_RESERVED_BITS) {
6aa8b732
AK
589 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
590 inject_gp(vcpu);
591 return;
592 }
593
a9058ecd 594 if (is_long_mode(vcpu)) {
66aee91a 595 if (!(cr4 & X86_CR4_PAE)) {
6aa8b732
AK
596 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
597 "in long mode\n");
598 inject_gp(vcpu);
599 return;
600 }
66aee91a 601 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
1342d353 602 && !load_pdptrs(vcpu, vcpu->cr3)) {
6aa8b732
AK
603 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
604 inject_gp(vcpu);
310bc76c 605 return;
6aa8b732
AK
606 }
607
66aee91a 608 if (cr4 & X86_CR4_VMXE) {
6aa8b732
AK
609 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
610 inject_gp(vcpu);
611 return;
612 }
613 kvm_arch_ops->set_cr4(vcpu, cr4);
614 spin_lock(&vcpu->kvm->lock);
615 kvm_mmu_reset_context(vcpu);
616 spin_unlock(&vcpu->kvm->lock);
617}
618EXPORT_SYMBOL_GPL(set_cr4);
619
620void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
621{
a9058ecd 622 if (is_long_mode(vcpu)) {
f802a307 623 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
6aa8b732
AK
624 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
625 inject_gp(vcpu);
626 return;
627 }
628 } else {
f802a307
RR
629 if (is_pae(vcpu)) {
630 if (cr3 & CR3_PAE_RESERVED_BITS) {
631 printk(KERN_DEBUG
632 "set_cr3: #GP, reserved bits\n");
633 inject_gp(vcpu);
634 return;
635 }
636 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
637 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
638 "reserved bits\n");
639 inject_gp(vcpu);
640 return;
641 }
642 } else {
643 if (cr3 & CR3_NONPAE_RESERVED_BITS) {
644 printk(KERN_DEBUG
645 "set_cr3: #GP, reserved bits\n");
646 inject_gp(vcpu);
647 return;
648 }
6aa8b732
AK
649 }
650 }
651
652 vcpu->cr3 = cr3;
653 spin_lock(&vcpu->kvm->lock);
d21225ee
IM
654 /*
655 * Does the new cr3 value map to physical memory? (Note, we
656 * catch an invalid cr3 even in real-mode, because it would
657 * cause trouble later on when we turn on paging anyway.)
658 *
659 * A real CPU would silently accept an invalid cr3 and would
660 * attempt to use it - with largely undefined (and often hard
661 * to debug) behavior on the guest side.
662 */
663 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
664 inject_gp(vcpu);
665 else
666 vcpu->mmu.new_cr3(vcpu);
6aa8b732
AK
667 spin_unlock(&vcpu->kvm->lock);
668}
669EXPORT_SYMBOL_GPL(set_cr3);
670
671void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
672{
7075bc81 673 if (cr8 & CR8_RESERVED_BITS) {
6aa8b732
AK
674 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
675 inject_gp(vcpu);
676 return;
677 }
678 vcpu->cr8 = cr8;
679}
680EXPORT_SYMBOL_GPL(set_cr8);
681
682void fx_init(struct kvm_vcpu *vcpu)
683{
684 struct __attribute__ ((__packed__)) fx_image_s {
685 u16 control; //fcw
686 u16 status; //fsw
687 u16 tag; // ftw
688 u16 opcode; //fop
689 u64 ip; // fpu ip
690 u64 operand;// fpu dp
691 u32 mxcsr;
692 u32 mxcsr_mask;
693
694 } *fx_image;
695
696 fx_save(vcpu->host_fx_image);
697 fpu_init();
698 fx_save(vcpu->guest_fx_image);
699 fx_restore(vcpu->host_fx_image);
700
701 fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
702 fx_image->mxcsr = 0x1f80;
703 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
704 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
705}
706EXPORT_SYMBOL_GPL(fx_init);
707
6aa8b732
AK
708/*
709 * Allocate some memory and give it an address in the guest physical address
710 * space.
711 *
712 * Discontiguous memory is allowed, mostly for framebuffers.
713 */
2c6f5df9
AK
714static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
715 struct kvm_memory_region *mem)
6aa8b732
AK
716{
717 int r;
718 gfn_t base_gfn;
719 unsigned long npages;
720 unsigned long i;
721 struct kvm_memory_slot *memslot;
722 struct kvm_memory_slot old, new;
723 int memory_config_version;
724
725 r = -EINVAL;
726 /* General sanity checks */
727 if (mem->memory_size & (PAGE_SIZE - 1))
728 goto out;
729 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
730 goto out;
731 if (mem->slot >= KVM_MEMORY_SLOTS)
732 goto out;
733 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
734 goto out;
735
736 memslot = &kvm->memslots[mem->slot];
737 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
738 npages = mem->memory_size >> PAGE_SHIFT;
739
740 if (!npages)
741 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
742
743raced:
744 spin_lock(&kvm->lock);
745
746 memory_config_version = kvm->memory_config_version;
747 new = old = *memslot;
748
749 new.base_gfn = base_gfn;
750 new.npages = npages;
751 new.flags = mem->flags;
752
753 /* Disallow changing a memory slot's size. */
754 r = -EINVAL;
755 if (npages && old.npages && npages != old.npages)
756 goto out_unlock;
757
758 /* Check for overlaps */
759 r = -EEXIST;
760 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
761 struct kvm_memory_slot *s = &kvm->memslots[i];
762
763 if (s == memslot)
764 continue;
765 if (!((base_gfn + npages <= s->base_gfn) ||
766 (base_gfn >= s->base_gfn + s->npages)))
767 goto out_unlock;
768 }
769 /*
770 * Do memory allocations outside lock. memory_config_version will
771 * detect any races.
772 */
773 spin_unlock(&kvm->lock);
774
775 /* Deallocate if slot is being removed */
776 if (!npages)
8b6d44c7 777 new.phys_mem = NULL;
6aa8b732
AK
778
779 /* Free page dirty bitmap if unneeded */
780 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 781 new.dirty_bitmap = NULL;
6aa8b732
AK
782
783 r = -ENOMEM;
784
785 /* Allocate if a slot is being created */
786 if (npages && !new.phys_mem) {
787 new.phys_mem = vmalloc(npages * sizeof(struct page *));
788
789 if (!new.phys_mem)
790 goto out_free;
791
792 memset(new.phys_mem, 0, npages * sizeof(struct page *));
793 for (i = 0; i < npages; ++i) {
794 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
795 | __GFP_ZERO);
796 if (!new.phys_mem[i])
797 goto out_free;
5972e953 798 set_page_private(new.phys_mem[i],0);
6aa8b732
AK
799 }
800 }
801
802 /* Allocate page dirty bitmap if needed */
803 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
804 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
805
806 new.dirty_bitmap = vmalloc(dirty_bytes);
807 if (!new.dirty_bitmap)
808 goto out_free;
809 memset(new.dirty_bitmap, 0, dirty_bytes);
810 }
811
812 spin_lock(&kvm->lock);
813
814 if (memory_config_version != kvm->memory_config_version) {
815 spin_unlock(&kvm->lock);
816 kvm_free_physmem_slot(&new, &old);
817 goto raced;
818 }
819
820 r = -EAGAIN;
821 if (kvm->busy)
822 goto out_unlock;
823
824 if (mem->slot >= kvm->nmemslots)
825 kvm->nmemslots = mem->slot + 1;
826
827 *memslot = new;
828 ++kvm->memory_config_version;
829
90cb0529
AK
830 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
831 kvm_flush_remote_tlbs(kvm);
6aa8b732 832
90cb0529 833 spin_unlock(&kvm->lock);
6aa8b732
AK
834
835 kvm_free_physmem_slot(&old, &new);
836 return 0;
837
838out_unlock:
839 spin_unlock(&kvm->lock);
840out_free:
841 kvm_free_physmem_slot(&new, &old);
842out:
843 return r;
844}
845
846/*
847 * Get (and clear) the dirty memory log for a memory slot.
848 */
2c6f5df9
AK
849static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
850 struct kvm_dirty_log *log)
6aa8b732
AK
851{
852 struct kvm_memory_slot *memslot;
853 int r, i;
854 int n;
855 unsigned long any = 0;
856
857 spin_lock(&kvm->lock);
858
859 /*
860 * Prevent changes to guest memory configuration even while the lock
861 * is not taken.
862 */
863 ++kvm->busy;
864 spin_unlock(&kvm->lock);
865 r = -EINVAL;
866 if (log->slot >= KVM_MEMORY_SLOTS)
867 goto out;
868
869 memslot = &kvm->memslots[log->slot];
870 r = -ENOENT;
871 if (!memslot->dirty_bitmap)
872 goto out;
873
cd1a4a98 874 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 875
cd1a4a98 876 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
877 any = memslot->dirty_bitmap[i];
878
879 r = -EFAULT;
880 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
881 goto out;
882
90cb0529
AK
883 spin_lock(&kvm->lock);
884 kvm_mmu_slot_remove_write_access(kvm, log->slot);
885 kvm_flush_remote_tlbs(kvm);
886 memset(memslot->dirty_bitmap, 0, n);
887 spin_unlock(&kvm->lock);
6aa8b732
AK
888
889 r = 0;
890
891out:
892 spin_lock(&kvm->lock);
893 --kvm->busy;
894 spin_unlock(&kvm->lock);
895 return r;
896}
897
e8207547
AK
898/*
899 * Set a new alias region. Aliases map a portion of physical memory into
900 * another portion. This is useful for memory windows, for example the PC
901 * VGA region.
902 */
903static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
904 struct kvm_memory_alias *alias)
905{
906 int r, n;
907 struct kvm_mem_alias *p;
908
909 r = -EINVAL;
910 /* General sanity checks */
911 if (alias->memory_size & (PAGE_SIZE - 1))
912 goto out;
913 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
914 goto out;
915 if (alias->slot >= KVM_ALIAS_SLOTS)
916 goto out;
917 if (alias->guest_phys_addr + alias->memory_size
918 < alias->guest_phys_addr)
919 goto out;
920 if (alias->target_phys_addr + alias->memory_size
921 < alias->target_phys_addr)
922 goto out;
923
924 spin_lock(&kvm->lock);
925
926 p = &kvm->aliases[alias->slot];
927 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
928 p->npages = alias->memory_size >> PAGE_SHIFT;
929 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
930
931 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
932 if (kvm->aliases[n - 1].npages)
933 break;
934 kvm->naliases = n;
935
90cb0529 936 kvm_mmu_zap_all(kvm);
e8207547 937
e8207547 938 spin_unlock(&kvm->lock);
e8207547
AK
939
940 return 0;
941
942out:
943 return r;
944}
945
946static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
947{
948 int i;
949 struct kvm_mem_alias *alias;
950
951 for (i = 0; i < kvm->naliases; ++i) {
952 alias = &kvm->aliases[i];
953 if (gfn >= alias->base_gfn
954 && gfn < alias->base_gfn + alias->npages)
955 return alias->target_gfn + gfn - alias->base_gfn;
956 }
957 return gfn;
958}
959
960static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
961{
962 int i;
963
964 for (i = 0; i < kvm->nmemslots; ++i) {
965 struct kvm_memory_slot *memslot = &kvm->memslots[i];
966
967 if (gfn >= memslot->base_gfn
968 && gfn < memslot->base_gfn + memslot->npages)
969 return memslot;
970 }
8b6d44c7 971 return NULL;
6aa8b732 972}
e8207547
AK
973
974struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
975{
976 gfn = unalias_gfn(kvm, gfn);
977 return __gfn_to_memslot(kvm, gfn);
978}
6aa8b732 979
954bbbc2
AK
980struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
981{
982 struct kvm_memory_slot *slot;
983
e8207547
AK
984 gfn = unalias_gfn(kvm, gfn);
985 slot = __gfn_to_memslot(kvm, gfn);
954bbbc2
AK
986 if (!slot)
987 return NULL;
988 return slot->phys_mem[gfn - slot->base_gfn];
989}
990EXPORT_SYMBOL_GPL(gfn_to_page);
991
6aa8b732
AK
992void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
993{
994 int i;
31389947 995 struct kvm_memory_slot *memslot;
6aa8b732
AK
996 unsigned long rel_gfn;
997
998 for (i = 0; i < kvm->nmemslots; ++i) {
999 memslot = &kvm->memslots[i];
1000
1001 if (gfn >= memslot->base_gfn
1002 && gfn < memslot->base_gfn + memslot->npages) {
1003
31389947 1004 if (!memslot->dirty_bitmap)
6aa8b732
AK
1005 return;
1006
1007 rel_gfn = gfn - memslot->base_gfn;
1008
1009 /* avoid RMW */
1010 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1011 set_bit(rel_gfn, memslot->dirty_bitmap);
1012 return;
1013 }
1014 }
1015}
1016
1017static int emulator_read_std(unsigned long addr,
4c690a1e 1018 void *val,
6aa8b732
AK
1019 unsigned int bytes,
1020 struct x86_emulate_ctxt *ctxt)
1021{
1022 struct kvm_vcpu *vcpu = ctxt->vcpu;
1023 void *data = val;
1024
1025 while (bytes) {
1026 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1027 unsigned offset = addr & (PAGE_SIZE-1);
1028 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1029 unsigned long pfn;
954bbbc2
AK
1030 struct page *page;
1031 void *page_virt;
6aa8b732
AK
1032
1033 if (gpa == UNMAPPED_GVA)
1034 return X86EMUL_PROPAGATE_FAULT;
1035 pfn = gpa >> PAGE_SHIFT;
954bbbc2
AK
1036 page = gfn_to_page(vcpu->kvm, pfn);
1037 if (!page)
6aa8b732 1038 return X86EMUL_UNHANDLEABLE;
954bbbc2 1039 page_virt = kmap_atomic(page, KM_USER0);
6aa8b732 1040
954bbbc2 1041 memcpy(data, page_virt + offset, tocopy);
6aa8b732 1042
954bbbc2 1043 kunmap_atomic(page_virt, KM_USER0);
6aa8b732
AK
1044
1045 bytes -= tocopy;
1046 data += tocopy;
1047 addr += tocopy;
1048 }
1049
1050 return X86EMUL_CONTINUE;
1051}
1052
1053static int emulator_write_std(unsigned long addr,
4c690a1e 1054 const void *val,
6aa8b732
AK
1055 unsigned int bytes,
1056 struct x86_emulate_ctxt *ctxt)
1057{
1058 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
1059 addr, bytes);
1060 return X86EMUL_UNHANDLEABLE;
1061}
1062
2eeb2e94
GH
1063static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1064 gpa_t addr)
1065{
1066 /*
1067 * Note that its important to have this wrapper function because
1068 * in the very near future we will be checking for MMIOs against
1069 * the LAPIC as well as the general MMIO bus
1070 */
1071 return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1072}
1073
74906345
ED
1074static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1075 gpa_t addr)
1076{
1077 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1078}
1079
6aa8b732 1080static int emulator_read_emulated(unsigned long addr,
4c690a1e 1081 void *val,
6aa8b732
AK
1082 unsigned int bytes,
1083 struct x86_emulate_ctxt *ctxt)
1084{
2eeb2e94
GH
1085 struct kvm_vcpu *vcpu = ctxt->vcpu;
1086 struct kvm_io_device *mmio_dev;
1087 gpa_t gpa;
6aa8b732
AK
1088
1089 if (vcpu->mmio_read_completed) {
1090 memcpy(val, vcpu->mmio_data, bytes);
1091 vcpu->mmio_read_completed = 0;
1092 return X86EMUL_CONTINUE;
1093 } else if (emulator_read_std(addr, val, bytes, ctxt)
1094 == X86EMUL_CONTINUE)
1095 return X86EMUL_CONTINUE;
d27d4aca 1096
2eeb2e94
GH
1097 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1098 if (gpa == UNMAPPED_GVA)
1099 return X86EMUL_PROPAGATE_FAULT;
6aa8b732 1100
2eeb2e94
GH
1101 /*
1102 * Is this MMIO handled locally?
1103 */
1104 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1105 if (mmio_dev) {
1106 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1107 return X86EMUL_CONTINUE;
6aa8b732 1108 }
2eeb2e94
GH
1109
1110 vcpu->mmio_needed = 1;
1111 vcpu->mmio_phys_addr = gpa;
1112 vcpu->mmio_size = bytes;
1113 vcpu->mmio_is_write = 0;
1114
1115 return X86EMUL_UNHANDLEABLE;
6aa8b732
AK
1116}
1117
da4a00f0 1118static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4c690a1e 1119 const void *val, int bytes)
da4a00f0 1120{
da4a00f0
AK
1121 struct page *page;
1122 void *virt;
1123
1124 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1125 return 0;
954bbbc2
AK
1126 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1127 if (!page)
da4a00f0 1128 return 0;
ab51a434 1129 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
da4a00f0 1130 virt = kmap_atomic(page, KM_USER0);
fe551881 1131 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
7cfa4b0a 1132 memcpy(virt + offset_in_page(gpa), val, bytes);
da4a00f0 1133 kunmap_atomic(virt, KM_USER0);
da4a00f0
AK
1134 return 1;
1135}
1136
b0fcd903
AK
1137static int emulator_write_emulated_onepage(unsigned long addr,
1138 const void *val,
1139 unsigned int bytes,
1140 struct x86_emulate_ctxt *ctxt)
6aa8b732 1141{
2eeb2e94
GH
1142 struct kvm_vcpu *vcpu = ctxt->vcpu;
1143 struct kvm_io_device *mmio_dev;
1144 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
6aa8b732 1145
c9047f53
AK
1146 if (gpa == UNMAPPED_GVA) {
1147 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
6aa8b732 1148 return X86EMUL_PROPAGATE_FAULT;
c9047f53 1149 }
6aa8b732 1150
da4a00f0
AK
1151 if (emulator_write_phys(vcpu, gpa, val, bytes))
1152 return X86EMUL_CONTINUE;
1153
2eeb2e94
GH
1154 /*
1155 * Is this MMIO handled locally?
1156 */
1157 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1158 if (mmio_dev) {
1159 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1160 return X86EMUL_CONTINUE;
1161 }
1162
6aa8b732
AK
1163 vcpu->mmio_needed = 1;
1164 vcpu->mmio_phys_addr = gpa;
1165 vcpu->mmio_size = bytes;
1166 vcpu->mmio_is_write = 1;
4c690a1e 1167 memcpy(vcpu->mmio_data, val, bytes);
6aa8b732
AK
1168
1169 return X86EMUL_CONTINUE;
1170}
1171
b0fcd903
AK
1172static int emulator_write_emulated(unsigned long addr,
1173 const void *val,
1174 unsigned int bytes,
1175 struct x86_emulate_ctxt *ctxt)
1176{
1177 /* Crossing a page boundary? */
1178 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1179 int rc, now;
1180
1181 now = -addr & ~PAGE_MASK;
1182 rc = emulator_write_emulated_onepage(addr, val, now, ctxt);
1183 if (rc != X86EMUL_CONTINUE)
1184 return rc;
1185 addr += now;
1186 val += now;
1187 bytes -= now;
1188 }
1189 return emulator_write_emulated_onepage(addr, val, bytes, ctxt);
1190}
1191
6aa8b732 1192static int emulator_cmpxchg_emulated(unsigned long addr,
4c690a1e
AK
1193 const void *old,
1194 const void *new,
6aa8b732
AK
1195 unsigned int bytes,
1196 struct x86_emulate_ctxt *ctxt)
1197{
1198 static int reported;
1199
1200 if (!reported) {
1201 reported = 1;
1202 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1203 }
1204 return emulator_write_emulated(addr, new, bytes, ctxt);
1205}
1206
1207static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1208{
1209 return kvm_arch_ops->get_segment_base(vcpu, seg);
1210}
1211
1212int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1213{
6aa8b732
AK
1214 return X86EMUL_CONTINUE;
1215}
1216
1217int emulate_clts(struct kvm_vcpu *vcpu)
1218{
399badf3 1219 unsigned long cr0;
6aa8b732 1220
707d92fa 1221 cr0 = vcpu->cr0 & ~X86_CR0_TS;
6aa8b732
AK
1222 kvm_arch_ops->set_cr0(vcpu, cr0);
1223 return X86EMUL_CONTINUE;
1224}
1225
1226int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1227{
1228 struct kvm_vcpu *vcpu = ctxt->vcpu;
1229
1230 switch (dr) {
1231 case 0 ... 3:
1232 *dest = kvm_arch_ops->get_dr(vcpu, dr);
1233 return X86EMUL_CONTINUE;
1234 default:
1235 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1236 __FUNCTION__, dr);
1237 return X86EMUL_UNHANDLEABLE;
1238 }
1239}
1240
1241int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1242{
1243 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1244 int exception;
1245
1246 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1247 if (exception) {
1248 /* FIXME: better handling */
1249 return X86EMUL_UNHANDLEABLE;
1250 }
1251 return X86EMUL_CONTINUE;
1252}
1253
1254static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
1255{
1256 static int reported;
1257 u8 opcodes[4];
1258 unsigned long rip = ctxt->vcpu->rip;
1259 unsigned long rip_linear;
1260
1261 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
1262
1263 if (reported)
1264 return;
1265
1266 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
1267
1268 printk(KERN_ERR "emulation failed but !mmio_needed?"
1269 " rip %lx %02x %02x %02x %02x\n",
1270 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1271 reported = 1;
1272}
1273
1274struct x86_emulate_ops emulate_ops = {
1275 .read_std = emulator_read_std,
1276 .write_std = emulator_write_std,
1277 .read_emulated = emulator_read_emulated,
1278 .write_emulated = emulator_write_emulated,
1279 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1280};
1281
1282int emulate_instruction(struct kvm_vcpu *vcpu,
1283 struct kvm_run *run,
1284 unsigned long cr2,
1285 u16 error_code)
1286{
1287 struct x86_emulate_ctxt emulate_ctxt;
1288 int r;
1289 int cs_db, cs_l;
1290
e7df56e4 1291 vcpu->mmio_fault_cr2 = cr2;
6aa8b732
AK
1292 kvm_arch_ops->cache_regs(vcpu);
1293
1294 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1295
1296 emulate_ctxt.vcpu = vcpu;
1297 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
1298 emulate_ctxt.cr2 = cr2;
1299 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1300 ? X86EMUL_MODE_REAL : cs_l
1301 ? X86EMUL_MODE_PROT64 : cs_db
1302 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1303
1304 if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1305 emulate_ctxt.cs_base = 0;
1306 emulate_ctxt.ds_base = 0;
1307 emulate_ctxt.es_base = 0;
1308 emulate_ctxt.ss_base = 0;
1309 } else {
1310 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1311 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1312 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1313 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1314 }
1315
1316 emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1317 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1318
1319 vcpu->mmio_is_write = 0;
1320 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1321
1322 if ((r || vcpu->mmio_is_write) && run) {
8fc0d085 1323 run->exit_reason = KVM_EXIT_MMIO;
6aa8b732
AK
1324 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1325 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1326 run->mmio.len = vcpu->mmio_size;
1327 run->mmio.is_write = vcpu->mmio_is_write;
1328 }
1329
1330 if (r) {
a436036b
AK
1331 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1332 return EMULATE_DONE;
6aa8b732
AK
1333 if (!vcpu->mmio_needed) {
1334 report_emulation_failure(&emulate_ctxt);
1335 return EMULATE_FAIL;
1336 }
1337 return EMULATE_DO_MMIO;
1338 }
1339
1340 kvm_arch_ops->decache_regs(vcpu);
1341 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1342
02c83209
AK
1343 if (vcpu->mmio_is_write) {
1344 vcpu->mmio_needed = 0;
6aa8b732 1345 return EMULATE_DO_MMIO;
02c83209 1346 }
6aa8b732
AK
1347
1348 return EMULATE_DONE;
1349}
1350EXPORT_SYMBOL_GPL(emulate_instruction);
1351
d3bef15f
AK
1352int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1353{
1354 if (vcpu->irq_summary)
1355 return 1;
1356
1357 vcpu->run->exit_reason = KVM_EXIT_HLT;
1358 ++vcpu->stat.halt_exits;
1359 return 0;
1360}
1361EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1362
270fd9b9
AK
1363int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1364{
1365 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1366
9b22bf57 1367 kvm_arch_ops->cache_regs(vcpu);
270fd9b9
AK
1368 ret = -KVM_EINVAL;
1369#ifdef CONFIG_X86_64
1370 if (is_long_mode(vcpu)) {
1371 nr = vcpu->regs[VCPU_REGS_RAX];
1372 a0 = vcpu->regs[VCPU_REGS_RDI];
1373 a1 = vcpu->regs[VCPU_REGS_RSI];
1374 a2 = vcpu->regs[VCPU_REGS_RDX];
1375 a3 = vcpu->regs[VCPU_REGS_RCX];
1376 a4 = vcpu->regs[VCPU_REGS_R8];
1377 a5 = vcpu->regs[VCPU_REGS_R9];
1378 } else
1379#endif
1380 {
1381 nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
1382 a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
1383 a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
1384 a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
1385 a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
1386 a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
1387 a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
1388 }
1389 switch (nr) {
1390 default:
519ef353 1391 run->hypercall.nr = nr;
b4e63f56
AK
1392 run->hypercall.args[0] = a0;
1393 run->hypercall.args[1] = a1;
1394 run->hypercall.args[2] = a2;
1395 run->hypercall.args[3] = a3;
1396 run->hypercall.args[4] = a4;
1397 run->hypercall.args[5] = a5;
1398 run->hypercall.ret = ret;
1399 run->hypercall.longmode = is_long_mode(vcpu);
1400 kvm_arch_ops->decache_regs(vcpu);
1401 return 0;
270fd9b9
AK
1402 }
1403 vcpu->regs[VCPU_REGS_RAX] = ret;
9b22bf57 1404 kvm_arch_ops->decache_regs(vcpu);
270fd9b9
AK
1405 return 1;
1406}
1407EXPORT_SYMBOL_GPL(kvm_hypercall);
1408
6aa8b732
AK
1409static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1410{
1411 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1412}
1413
1414void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1415{
1416 struct descriptor_table dt = { limit, base };
1417
1418 kvm_arch_ops->set_gdt(vcpu, &dt);
1419}
1420
1421void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1422{
1423 struct descriptor_table dt = { limit, base };
1424
1425 kvm_arch_ops->set_idt(vcpu, &dt);
1426}
1427
1428void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1429 unsigned long *rflags)
1430{
1431 lmsw(vcpu, msw);
1432 *rflags = kvm_arch_ops->get_rflags(vcpu);
1433}
1434
1435unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1436{
25c4c276 1437 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
6aa8b732
AK
1438 switch (cr) {
1439 case 0:
1440 return vcpu->cr0;
1441 case 2:
1442 return vcpu->cr2;
1443 case 3:
1444 return vcpu->cr3;
1445 case 4:
1446 return vcpu->cr4;
1447 default:
1448 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1449 return 0;
1450 }
1451}
1452
1453void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1454 unsigned long *rflags)
1455{
1456 switch (cr) {
1457 case 0:
1458 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1459 *rflags = kvm_arch_ops->get_rflags(vcpu);
1460 break;
1461 case 2:
1462 vcpu->cr2 = val;
1463 break;
1464 case 3:
1465 set_cr3(vcpu, val);
1466 break;
1467 case 4:
1468 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1469 break;
1470 default:
1471 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1472 }
1473}
1474
102d8325
IM
1475/*
1476 * Register the para guest with the host:
1477 */
1478static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1479{
1480 struct kvm_vcpu_para_state *para_state;
1481 hpa_t para_state_hpa, hypercall_hpa;
1482 struct page *para_state_page;
1483 unsigned char *hypercall;
1484 gpa_t hypercall_gpa;
1485
1486 printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1487 printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1488
1489 /*
1490 * Needs to be page aligned:
1491 */
1492 if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1493 goto err_gp;
1494
1495 para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1496 printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1497 if (is_error_hpa(para_state_hpa))
1498 goto err_gp;
1499
ab51a434 1500 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
102d8325 1501 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
fe551881 1502 para_state = kmap(para_state_page);
102d8325
IM
1503
1504 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1505 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
1506
1507 para_state->host_version = KVM_PARA_API_VERSION;
1508 /*
1509 * We cannot support guests that try to register themselves
1510 * with a newer API version than the host supports:
1511 */
1512 if (para_state->guest_version > KVM_PARA_API_VERSION) {
1513 para_state->ret = -KVM_EINVAL;
1514 goto err_kunmap_skip;
1515 }
1516
1517 hypercall_gpa = para_state->hypercall_gpa;
1518 hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1519 printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1520 if (is_error_hpa(hypercall_hpa)) {
1521 para_state->ret = -KVM_EINVAL;
1522 goto err_kunmap_skip;
1523 }
1524
1525 printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1526 vcpu->para_state_page = para_state_page;
1527 vcpu->para_state_gpa = para_state_gpa;
1528 vcpu->hypercall_gpa = hypercall_gpa;
1529
ab51a434 1530 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
102d8325
IM
1531 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1532 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1533 kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1534 kunmap_atomic(hypercall, KM_USER1);
1535
1536 para_state->ret = 0;
1537err_kunmap_skip:
fe551881 1538 kunmap(para_state_page);
102d8325
IM
1539 return 0;
1540err_gp:
1541 return 1;
1542}
1543
3bab1f5d
AK
1544int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1545{
1546 u64 data;
1547
1548 switch (msr) {
1549 case 0xc0010010: /* SYSCFG */
1550 case 0xc0010015: /* HWCR */
1551 case MSR_IA32_PLATFORM_ID:
1552 case MSR_IA32_P5_MC_ADDR:
1553 case MSR_IA32_P5_MC_TYPE:
1554 case MSR_IA32_MC0_CTL:
1555 case MSR_IA32_MCG_STATUS:
1556 case MSR_IA32_MCG_CAP:
1557 case MSR_IA32_MC0_MISC:
1558 case MSR_IA32_MC0_MISC+4:
1559 case MSR_IA32_MC0_MISC+8:
1560 case MSR_IA32_MC0_MISC+12:
1561 case MSR_IA32_MC0_MISC+16:
1562 case MSR_IA32_UCODE_REV:
a8d13ea2 1563 case MSR_IA32_PERF_STATUS:
2dc7094b 1564 case MSR_IA32_EBL_CR_POWERON:
3bab1f5d
AK
1565 /* MTRR registers */
1566 case 0xfe:
1567 case 0x200 ... 0x2ff:
1568 data = 0;
1569 break;
a8d13ea2
AK
1570 case 0xcd: /* fsb frequency */
1571 data = 3;
1572 break;
3bab1f5d
AK
1573 case MSR_IA32_APICBASE:
1574 data = vcpu->apic_base;
1575 break;
6f00e68f
AK
1576 case MSR_IA32_MISC_ENABLE:
1577 data = vcpu->ia32_misc_enable_msr;
1578 break;
3bab1f5d
AK
1579#ifdef CONFIG_X86_64
1580 case MSR_EFER:
1581 data = vcpu->shadow_efer;
1582 break;
1583#endif
1584 default:
1585 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
1586 return 1;
1587 }
1588 *pdata = data;
1589 return 0;
1590}
1591EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1592
6aa8b732
AK
1593/*
1594 * Reads an msr value (of 'msr_index') into 'pdata'.
1595 * Returns 0 on success, non-0 otherwise.
1596 * Assumes vcpu_load() was already called.
1597 */
35f3f286 1598int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
6aa8b732
AK
1599{
1600 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1601}
1602
05b3e0c2 1603#ifdef CONFIG_X86_64
6aa8b732 1604
3bab1f5d 1605static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
6aa8b732 1606{
6aa8b732
AK
1607 if (efer & EFER_RESERVED_BITS) {
1608 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1609 efer);
1610 inject_gp(vcpu);
1611 return;
1612 }
1613
1614 if (is_paging(vcpu)
1615 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1616 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1617 inject_gp(vcpu);
1618 return;
1619 }
1620
7725f0ba
AK
1621 kvm_arch_ops->set_efer(vcpu, efer);
1622
6aa8b732
AK
1623 efer &= ~EFER_LMA;
1624 efer |= vcpu->shadow_efer & EFER_LMA;
1625
1626 vcpu->shadow_efer = efer;
6aa8b732 1627}
6aa8b732
AK
1628
1629#endif
1630
3bab1f5d
AK
1631int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1632{
1633 switch (msr) {
1634#ifdef CONFIG_X86_64
1635 case MSR_EFER:
1636 set_efer(vcpu, data);
1637 break;
1638#endif
1639 case MSR_IA32_MC0_STATUS:
1640 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1641 __FUNCTION__, data);
1642 break;
0e5bf0d0
SK
1643 case MSR_IA32_MCG_STATUS:
1644 printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1645 __FUNCTION__, data);
1646 break;
3bab1f5d
AK
1647 case MSR_IA32_UCODE_REV:
1648 case MSR_IA32_UCODE_WRITE:
1649 case 0x200 ... 0x2ff: /* MTRRs */
1650 break;
1651 case MSR_IA32_APICBASE:
1652 vcpu->apic_base = data;
1653 break;
6f00e68f
AK
1654 case MSR_IA32_MISC_ENABLE:
1655 vcpu->ia32_misc_enable_msr = data;
1656 break;
102d8325
IM
1657 /*
1658 * This is the 'probe whether the host is KVM' logic:
1659 */
1660 case MSR_KVM_API_MAGIC:
1661 return vcpu_register_para(vcpu, data);
1662
3bab1f5d
AK
1663 default:
1664 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
1665 return 1;
1666 }
1667 return 0;
1668}
1669EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1670
6aa8b732
AK
1671/*
1672 * Writes msr value into into the appropriate "register".
1673 * Returns 0 on success, non-0 otherwise.
1674 * Assumes vcpu_load() was already called.
1675 */
35f3f286 1676int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
6aa8b732
AK
1677{
1678 return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1679}
1680
1681void kvm_resched(struct kvm_vcpu *vcpu)
1682{
3fca0365
YD
1683 if (!need_resched())
1684 return;
6aa8b732 1685 cond_resched();
6aa8b732
AK
1686}
1687EXPORT_SYMBOL_GPL(kvm_resched);
1688
06465c5a
AK
1689void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1690{
1691 int i;
1692 u32 function;
1693 struct kvm_cpuid_entry *e, *best;
1694
1695 kvm_arch_ops->cache_regs(vcpu);
1696 function = vcpu->regs[VCPU_REGS_RAX];
1697 vcpu->regs[VCPU_REGS_RAX] = 0;
1698 vcpu->regs[VCPU_REGS_RBX] = 0;
1699 vcpu->regs[VCPU_REGS_RCX] = 0;
1700 vcpu->regs[VCPU_REGS_RDX] = 0;
1701 best = NULL;
1702 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1703 e = &vcpu->cpuid_entries[i];
1704 if (e->function == function) {
1705 best = e;
1706 break;
1707 }
1708 /*
1709 * Both basic or both extended?
1710 */
1711 if (((e->function ^ function) & 0x80000000) == 0)
1712 if (!best || e->function > best->function)
1713 best = e;
1714 }
1715 if (best) {
1716 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1717 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1718 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1719 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1720 }
1721 kvm_arch_ops->decache_regs(vcpu);
1722 kvm_arch_ops->skip_emulated_instruction(vcpu);
1723}
1724EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1725
039576c0 1726static int pio_copy_data(struct kvm_vcpu *vcpu)
46fc1477 1727{
039576c0
AK
1728 void *p = vcpu->pio_data;
1729 void *q;
1730 unsigned bytes;
1731 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1732
039576c0
AK
1733 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1734 PAGE_KERNEL);
1735 if (!q) {
039576c0
AK
1736 free_pio_guest_pages(vcpu);
1737 return -ENOMEM;
1738 }
1739 q += vcpu->pio.guest_page_offset;
1740 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1741 if (vcpu->pio.in)
1742 memcpy(q, p, bytes);
1743 else
1744 memcpy(p, q, bytes);
1745 q -= vcpu->pio.guest_page_offset;
1746 vunmap(q);
039576c0
AK
1747 free_pio_guest_pages(vcpu);
1748 return 0;
1749}
1750
1751static int complete_pio(struct kvm_vcpu *vcpu)
1752{
1753 struct kvm_pio_request *io = &vcpu->pio;
46fc1477 1754 long delta;
039576c0 1755 int r;
46fc1477
AK
1756
1757 kvm_arch_ops->cache_regs(vcpu);
1758
1759 if (!io->string) {
039576c0
AK
1760 if (io->in)
1761 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
46fc1477
AK
1762 io->size);
1763 } else {
039576c0
AK
1764 if (io->in) {
1765 r = pio_copy_data(vcpu);
1766 if (r) {
1767 kvm_arch_ops->cache_regs(vcpu);
1768 return r;
1769 }
1770 }
1771
46fc1477
AK
1772 delta = 1;
1773 if (io->rep) {
039576c0 1774 delta *= io->cur_count;
46fc1477
AK
1775 /*
1776 * The size of the register should really depend on
1777 * current address size.
1778 */
1779 vcpu->regs[VCPU_REGS_RCX] -= delta;
1780 }
039576c0 1781 if (io->down)
46fc1477
AK
1782 delta = -delta;
1783 delta *= io->size;
039576c0 1784 if (io->in)
46fc1477
AK
1785 vcpu->regs[VCPU_REGS_RDI] += delta;
1786 else
1787 vcpu->regs[VCPU_REGS_RSI] += delta;
1788 }
1789
46fc1477
AK
1790 kvm_arch_ops->decache_regs(vcpu);
1791
039576c0
AK
1792 io->count -= io->cur_count;
1793 io->cur_count = 0;
1794
1795 if (!io->count)
1796 kvm_arch_ops->skip_emulated_instruction(vcpu);
1797 return 0;
46fc1477
AK
1798}
1799
65619eb5
ED
1800static void kernel_pio(struct kvm_io_device *pio_dev,
1801 struct kvm_vcpu *vcpu,
1802 void *pd)
74906345
ED
1803{
1804 /* TODO: String I/O for in kernel device */
1805
1806 if (vcpu->pio.in)
1807 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1808 vcpu->pio.size,
65619eb5 1809 pd);
74906345
ED
1810 else
1811 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1812 vcpu->pio.size,
65619eb5
ED
1813 pd);
1814}
1815
1816static void pio_string_write(struct kvm_io_device *pio_dev,
1817 struct kvm_vcpu *vcpu)
1818{
1819 struct kvm_pio_request *io = &vcpu->pio;
1820 void *pd = vcpu->pio_data;
1821 int i;
1822
1823 for (i = 0; i < io->cur_count; i++) {
1824 kvm_iodevice_write(pio_dev, io->port,
1825 io->size,
1826 pd);
1827 pd += io->size;
1828 }
74906345
ED
1829}
1830
039576c0
AK
1831int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1832 int size, unsigned long count, int string, int down,
1833 gva_t address, int rep, unsigned port)
1834{
1835 unsigned now, in_page;
65619eb5 1836 int i, ret = 0;
039576c0
AK
1837 int nr_pages = 1;
1838 struct page *page;
74906345 1839 struct kvm_io_device *pio_dev;
039576c0
AK
1840
1841 vcpu->run->exit_reason = KVM_EXIT_IO;
1842 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1843 vcpu->run->io.size = size;
1844 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1845 vcpu->run->io.count = count;
1846 vcpu->run->io.port = port;
1847 vcpu->pio.count = count;
1848 vcpu->pio.cur_count = count;
1849 vcpu->pio.size = size;
1850 vcpu->pio.in = in;
74906345 1851 vcpu->pio.port = port;
039576c0
AK
1852 vcpu->pio.string = string;
1853 vcpu->pio.down = down;
1854 vcpu->pio.guest_page_offset = offset_in_page(address);
1855 vcpu->pio.rep = rep;
1856
74906345 1857 pio_dev = vcpu_find_pio_dev(vcpu, port);
039576c0
AK
1858 if (!string) {
1859 kvm_arch_ops->cache_regs(vcpu);
1860 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1861 kvm_arch_ops->decache_regs(vcpu);
74906345 1862 if (pio_dev) {
65619eb5 1863 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
74906345
ED
1864 complete_pio(vcpu);
1865 return 1;
1866 }
039576c0
AK
1867 return 0;
1868 }
1869
1870 if (!count) {
1871 kvm_arch_ops->skip_emulated_instruction(vcpu);
1872 return 1;
1873 }
1874
1875 now = min(count, PAGE_SIZE / size);
1876
1877 if (!down)
1878 in_page = PAGE_SIZE - offset_in_page(address);
1879 else
1880 in_page = offset_in_page(address) + size;
1881 now = min(count, (unsigned long)in_page / size);
1882 if (!now) {
1883 /*
1884 * String I/O straddles page boundary. Pin two guest pages
1885 * so that we satisfy atomicity constraints. Do just one
1886 * transaction to avoid complexity.
1887 */
1888 nr_pages = 2;
1889 now = 1;
1890 }
1891 if (down) {
1892 /*
1893 * String I/O in reverse. Yuck. Kill the guest, fix later.
1894 */
1895 printk(KERN_ERR "kvm: guest string pio down\n");
1896 inject_gp(vcpu);
1897 return 1;
1898 }
1899 vcpu->run->io.count = now;
1900 vcpu->pio.cur_count = now;
1901
1902 for (i = 0; i < nr_pages; ++i) {
1903 spin_lock(&vcpu->kvm->lock);
1904 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1905 if (page)
1906 get_page(page);
1907 vcpu->pio.guest_pages[i] = page;
1908 spin_unlock(&vcpu->kvm->lock);
1909 if (!page) {
1910 inject_gp(vcpu);
1911 free_pio_guest_pages(vcpu);
1912 return 1;
1913 }
1914 }
1915
65619eb5
ED
1916 if (!vcpu->pio.in) {
1917 /* string PIO write */
1918 ret = pio_copy_data(vcpu);
1919 if (ret >= 0 && pio_dev) {
1920 pio_string_write(pio_dev, vcpu);
1921 complete_pio(vcpu);
1922 if (vcpu->pio.count == 0)
1923 ret = 1;
1924 }
1925 } else if (pio_dev)
1926 printk(KERN_ERR "no string pio read support yet, "
1927 "port %x size %d count %ld\n",
1928 port, size, count);
1929
1930 return ret;
039576c0
AK
1931}
1932EXPORT_SYMBOL_GPL(kvm_setup_pio);
1933
bccf2150 1934static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6aa8b732 1935{
6aa8b732 1936 int r;
1961d276 1937 sigset_t sigsaved;
6aa8b732 1938
bccf2150 1939 vcpu_load(vcpu);
6aa8b732 1940
1961d276
AK
1941 if (vcpu->sigset_active)
1942 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1943
54810342
DL
1944 /* re-sync apic's tpr */
1945 vcpu->cr8 = kvm_run->cr8;
1946
02c83209
AK
1947 if (vcpu->pio.cur_count) {
1948 r = complete_pio(vcpu);
1949 if (r)
1950 goto out;
1951 }
1952
1953 if (vcpu->mmio_needed) {
1954 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1955 vcpu->mmio_read_completed = 1;
1956 vcpu->mmio_needed = 0;
1957 r = emulate_instruction(vcpu, kvm_run,
1958 vcpu->mmio_fault_cr2, 0);
1959 if (r == EMULATE_DO_MMIO) {
1960 /*
1961 * Read-modify-write. Back to userspace.
1962 */
02c83209
AK
1963 r = 0;
1964 goto out;
46fc1477 1965 }
6aa8b732
AK
1966 }
1967
8eb7d334 1968 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
b4e63f56
AK
1969 kvm_arch_ops->cache_regs(vcpu);
1970 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1971 kvm_arch_ops->decache_regs(vcpu);
1972 }
1973
6aa8b732
AK
1974 r = kvm_arch_ops->run(vcpu, kvm_run);
1975
039576c0 1976out:
1961d276
AK
1977 if (vcpu->sigset_active)
1978 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1979
6aa8b732
AK
1980 vcpu_put(vcpu);
1981 return r;
1982}
1983
bccf2150
AK
1984static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1985 struct kvm_regs *regs)
6aa8b732 1986{
bccf2150 1987 vcpu_load(vcpu);
6aa8b732
AK
1988
1989 kvm_arch_ops->cache_regs(vcpu);
1990
1991 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1992 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1993 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1994 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1995 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1996 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1997 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1998 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
05b3e0c2 1999#ifdef CONFIG_X86_64
6aa8b732
AK
2000 regs->r8 = vcpu->regs[VCPU_REGS_R8];
2001 regs->r9 = vcpu->regs[VCPU_REGS_R9];
2002 regs->r10 = vcpu->regs[VCPU_REGS_R10];
2003 regs->r11 = vcpu->regs[VCPU_REGS_R11];
2004 regs->r12 = vcpu->regs[VCPU_REGS_R12];
2005 regs->r13 = vcpu->regs[VCPU_REGS_R13];
2006 regs->r14 = vcpu->regs[VCPU_REGS_R14];
2007 regs->r15 = vcpu->regs[VCPU_REGS_R15];
2008#endif
2009
2010 regs->rip = vcpu->rip;
2011 regs->rflags = kvm_arch_ops->get_rflags(vcpu);
2012
2013 /*
2014 * Don't leak debug flags in case they were set for guest debugging
2015 */
2016 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2017 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2018
2019 vcpu_put(vcpu);
2020
2021 return 0;
2022}
2023
bccf2150
AK
2024static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
2025 struct kvm_regs *regs)
6aa8b732 2026{
bccf2150 2027 vcpu_load(vcpu);
6aa8b732
AK
2028
2029 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2030 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2031 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2032 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2033 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2034 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2035 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2036 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
05b3e0c2 2037#ifdef CONFIG_X86_64
6aa8b732
AK
2038 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2039 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2040 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2041 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2042 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2043 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2044 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2045 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2046#endif
2047
2048 vcpu->rip = regs->rip;
2049 kvm_arch_ops->set_rflags(vcpu, regs->rflags);
2050
2051 kvm_arch_ops->decache_regs(vcpu);
2052
2053 vcpu_put(vcpu);
2054
2055 return 0;
2056}
2057
2058static void get_segment(struct kvm_vcpu *vcpu,
2059 struct kvm_segment *var, int seg)
2060{
2061 return kvm_arch_ops->get_segment(vcpu, var, seg);
2062}
2063
bccf2150
AK
2064static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2065 struct kvm_sregs *sregs)
6aa8b732 2066{
6aa8b732
AK
2067 struct descriptor_table dt;
2068
bccf2150 2069 vcpu_load(vcpu);
6aa8b732
AK
2070
2071 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2072 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2073 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2074 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2075 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2076 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2077
2078 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2079 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2080
2081 kvm_arch_ops->get_idt(vcpu, &dt);
2082 sregs->idt.limit = dt.limit;
2083 sregs->idt.base = dt.base;
2084 kvm_arch_ops->get_gdt(vcpu, &dt);
2085 sregs->gdt.limit = dt.limit;
2086 sregs->gdt.base = dt.base;
2087
25c4c276 2088 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
6aa8b732
AK
2089 sregs->cr0 = vcpu->cr0;
2090 sregs->cr2 = vcpu->cr2;
2091 sregs->cr3 = vcpu->cr3;
2092 sregs->cr4 = vcpu->cr4;
2093 sregs->cr8 = vcpu->cr8;
2094 sregs->efer = vcpu->shadow_efer;
2095 sregs->apic_base = vcpu->apic_base;
2096
2097 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2098 sizeof sregs->interrupt_bitmap);
2099
2100 vcpu_put(vcpu);
2101
2102 return 0;
2103}
2104
2105static void set_segment(struct kvm_vcpu *vcpu,
2106 struct kvm_segment *var, int seg)
2107{
2108 return kvm_arch_ops->set_segment(vcpu, var, seg);
2109}
2110
bccf2150
AK
2111static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2112 struct kvm_sregs *sregs)
6aa8b732 2113{
6aa8b732
AK
2114 int mmu_reset_needed = 0;
2115 int i;
2116 struct descriptor_table dt;
2117
bccf2150 2118 vcpu_load(vcpu);
6aa8b732 2119
6aa8b732
AK
2120 dt.limit = sregs->idt.limit;
2121 dt.base = sregs->idt.base;
2122 kvm_arch_ops->set_idt(vcpu, &dt);
2123 dt.limit = sregs->gdt.limit;
2124 dt.base = sregs->gdt.base;
2125 kvm_arch_ops->set_gdt(vcpu, &dt);
2126
2127 vcpu->cr2 = sregs->cr2;
2128 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2129 vcpu->cr3 = sregs->cr3;
2130
2131 vcpu->cr8 = sregs->cr8;
2132
2133 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
05b3e0c2 2134#ifdef CONFIG_X86_64
6aa8b732
AK
2135 kvm_arch_ops->set_efer(vcpu, sregs->efer);
2136#endif
2137 vcpu->apic_base = sregs->apic_base;
2138
25c4c276 2139 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
399badf3 2140
6aa8b732 2141 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
f6528b03 2142 kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
6aa8b732
AK
2143
2144 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2145 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
1b0973bd
AK
2146 if (!is_long_mode(vcpu) && is_pae(vcpu))
2147 load_pdptrs(vcpu, vcpu->cr3);
6aa8b732
AK
2148
2149 if (mmu_reset_needed)
2150 kvm_mmu_reset_context(vcpu);
2151
2152 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2153 sizeof vcpu->irq_pending);
2154 vcpu->irq_summary = 0;
9eb829ce 2155 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
6aa8b732
AK
2156 if (vcpu->irq_pending[i])
2157 __set_bit(i, &vcpu->irq_summary);
2158
024aa1c0
AK
2159 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2160 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2161 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2162 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2163 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2164 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2165
2166 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2167 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2168
6aa8b732
AK
2169 vcpu_put(vcpu);
2170
2171 return 0;
2172}
2173
2174/*
2175 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2176 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
bf591b24
MR
2177 *
2178 * This list is modified at module load time to reflect the
2179 * capabilities of the host cpu.
6aa8b732
AK
2180 */
2181static u32 msrs_to_save[] = {
2182 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
2183 MSR_K6_STAR,
05b3e0c2 2184#ifdef CONFIG_X86_64
6aa8b732
AK
2185 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
2186#endif
2187 MSR_IA32_TIME_STAMP_COUNTER,
2188};
2189
bf591b24
MR
2190static unsigned num_msrs_to_save;
2191
6f00e68f
AK
2192static u32 emulated_msrs[] = {
2193 MSR_IA32_MISC_ENABLE,
2194};
2195
bf591b24
MR
2196static __init void kvm_init_msr_list(void)
2197{
2198 u32 dummy[2];
2199 unsigned i, j;
2200
2201 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2202 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2203 continue;
2204 if (j < i)
2205 msrs_to_save[j] = msrs_to_save[i];
2206 j++;
2207 }
2208 num_msrs_to_save = j;
2209}
6aa8b732
AK
2210
2211/*
2212 * Adapt set_msr() to msr_io()'s calling convention
2213 */
2214static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2215{
35f3f286 2216 return kvm_set_msr(vcpu, index, *data);
6aa8b732
AK
2217}
2218
2219/*
2220 * Read or write a bunch of msrs. All parameters are kernel addresses.
2221 *
2222 * @return number of msrs set successfully.
2223 */
bccf2150 2224static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
6aa8b732
AK
2225 struct kvm_msr_entry *entries,
2226 int (*do_msr)(struct kvm_vcpu *vcpu,
2227 unsigned index, u64 *data))
2228{
6aa8b732
AK
2229 int i;
2230
bccf2150 2231 vcpu_load(vcpu);
6aa8b732
AK
2232
2233 for (i = 0; i < msrs->nmsrs; ++i)
2234 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2235 break;
2236
2237 vcpu_put(vcpu);
2238
2239 return i;
2240}
2241
2242/*
2243 * Read or write a bunch of msrs. Parameters are user addresses.
2244 *
2245 * @return number of msrs set successfully.
2246 */
bccf2150 2247static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
6aa8b732
AK
2248 int (*do_msr)(struct kvm_vcpu *vcpu,
2249 unsigned index, u64 *data),
2250 int writeback)
2251{
2252 struct kvm_msrs msrs;
2253 struct kvm_msr_entry *entries;
2254 int r, n;
2255 unsigned size;
2256
2257 r = -EFAULT;
2258 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2259 goto out;
2260
2261 r = -E2BIG;
2262 if (msrs.nmsrs >= MAX_IO_MSRS)
2263 goto out;
2264
2265 r = -ENOMEM;
2266 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2267 entries = vmalloc(size);
2268 if (!entries)
2269 goto out;
2270
2271 r = -EFAULT;
2272 if (copy_from_user(entries, user_msrs->entries, size))
2273 goto out_free;
2274
bccf2150 2275 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
6aa8b732
AK
2276 if (r < 0)
2277 goto out_free;
2278
2279 r = -EFAULT;
2280 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2281 goto out_free;
2282
2283 r = n;
2284
2285out_free:
2286 vfree(entries);
2287out:
2288 return r;
2289}
2290
2291/*
2292 * Translate a guest virtual address to a guest physical address.
2293 */
bccf2150
AK
2294static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2295 struct kvm_translation *tr)
6aa8b732
AK
2296{
2297 unsigned long vaddr = tr->linear_address;
6aa8b732
AK
2298 gpa_t gpa;
2299
bccf2150
AK
2300 vcpu_load(vcpu);
2301 spin_lock(&vcpu->kvm->lock);
6aa8b732
AK
2302 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2303 tr->physical_address = gpa;
2304 tr->valid = gpa != UNMAPPED_GVA;
2305 tr->writeable = 1;
2306 tr->usermode = 0;
bccf2150 2307 spin_unlock(&vcpu->kvm->lock);
6aa8b732
AK
2308 vcpu_put(vcpu);
2309
2310 return 0;
2311}
2312
bccf2150
AK
2313static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2314 struct kvm_interrupt *irq)
6aa8b732 2315{
6aa8b732
AK
2316 if (irq->irq < 0 || irq->irq >= 256)
2317 return -EINVAL;
bccf2150 2318 vcpu_load(vcpu);
6aa8b732
AK
2319
2320 set_bit(irq->irq, vcpu->irq_pending);
2321 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2322
2323 vcpu_put(vcpu);
2324
2325 return 0;
2326}
2327
bccf2150
AK
2328static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2329 struct kvm_debug_guest *dbg)
6aa8b732 2330{
6aa8b732
AK
2331 int r;
2332
bccf2150 2333 vcpu_load(vcpu);
6aa8b732
AK
2334
2335 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
2336
2337 vcpu_put(vcpu);
2338
2339 return r;
2340}
2341
9a2bb7f4
AK
2342static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2343 unsigned long address,
2344 int *type)
2345{
2346 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2347 unsigned long pgoff;
2348 struct page *page;
2349
9a2bb7f4 2350 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
2351 if (pgoff == 0)
2352 page = virt_to_page(vcpu->run);
2353 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2354 page = virt_to_page(vcpu->pio_data);
2355 else
9a2bb7f4 2356 return NOPAGE_SIGBUS;
9a2bb7f4 2357 get_page(page);
cd0d9137
NAQ
2358 if (type != NULL)
2359 *type = VM_FAULT_MINOR;
2360
9a2bb7f4
AK
2361 return page;
2362}
2363
2364static struct vm_operations_struct kvm_vcpu_vm_ops = {
2365 .nopage = kvm_vcpu_nopage,
2366};
2367
2368static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2369{
2370 vma->vm_ops = &kvm_vcpu_vm_ops;
2371 return 0;
2372}
2373
bccf2150
AK
2374static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2375{
2376 struct kvm_vcpu *vcpu = filp->private_data;
2377
2378 fput(vcpu->kvm->filp);
2379 return 0;
2380}
2381
2382static struct file_operations kvm_vcpu_fops = {
2383 .release = kvm_vcpu_release,
2384 .unlocked_ioctl = kvm_vcpu_ioctl,
2385 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 2386 .mmap = kvm_vcpu_mmap,
bccf2150
AK
2387};
2388
2389/*
2390 * Allocates an inode for the vcpu.
2391 */
2392static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2393{
2394 int fd, r;
2395 struct inode *inode;
2396 struct file *file;
2397
d6d28168
AK
2398 r = anon_inode_getfd(&fd, &inode, &file,
2399 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2400 if (r)
2401 return r;
bccf2150 2402 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 2403 return fd;
bccf2150
AK
2404}
2405
c5ea7660
AK
2406/*
2407 * Creates some virtual cpus. Good luck creating more than one.
2408 */
2409static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2410{
2411 int r;
2412 struct kvm_vcpu *vcpu;
2413
c5ea7660 2414 if (!valid_vcpu(n))
fb3f0f51 2415 return -EINVAL;
c5ea7660 2416
fb3f0f51
RR
2417 vcpu = kvm_arch_ops->vcpu_create(kvm, n);
2418 if (IS_ERR(vcpu))
2419 return PTR_ERR(vcpu);
c5ea7660 2420
15ad7146
AK
2421 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2422
fb3f0f51 2423 vcpu_load(vcpu);
c5ea7660 2424 r = kvm_mmu_setup(vcpu);
c5ea7660 2425 vcpu_put(vcpu);
c5ea7660 2426 if (r < 0)
fb3f0f51
RR
2427 goto free_vcpu;
2428
2429 spin_lock(&kvm->lock);
2430 if (kvm->vcpus[n]) {
2431 r = -EEXIST;
2432 spin_unlock(&kvm->lock);
2433 goto mmu_unload;
2434 }
2435 kvm->vcpus[n] = vcpu;
2436 spin_unlock(&kvm->lock);
c5ea7660 2437
fb3f0f51 2438 /* Now it's all set up, let userspace reach it */
bccf2150
AK
2439 r = create_vcpu_fd(vcpu);
2440 if (r < 0)
fb3f0f51
RR
2441 goto unlink;
2442 return r;
39c3b86e 2443
fb3f0f51
RR
2444unlink:
2445 spin_lock(&kvm->lock);
2446 kvm->vcpus[n] = NULL;
2447 spin_unlock(&kvm->lock);
a2fa3e9f 2448
fb3f0f51
RR
2449mmu_unload:
2450 vcpu_load(vcpu);
2451 kvm_mmu_unload(vcpu);
2452 vcpu_put(vcpu);
c5ea7660 2453
fb3f0f51
RR
2454free_vcpu:
2455 kvm_arch_ops->vcpu_free(vcpu);
c5ea7660
AK
2456 return r;
2457}
2458
2cc51560
ED
2459static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2460{
2461 u64 efer;
2462 int i;
2463 struct kvm_cpuid_entry *e, *entry;
2464
2465 rdmsrl(MSR_EFER, efer);
2466 entry = NULL;
2467 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2468 e = &vcpu->cpuid_entries[i];
2469 if (e->function == 0x80000001) {
2470 entry = e;
2471 break;
2472 }
2473 }
4c981b43 2474 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
2cc51560 2475 entry->edx &= ~(1 << 20);
4c981b43 2476 printk(KERN_INFO "kvm: guest NX capability removed\n");
2cc51560
ED
2477 }
2478}
2479
06465c5a
AK
2480static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2481 struct kvm_cpuid *cpuid,
2482 struct kvm_cpuid_entry __user *entries)
2483{
2484 int r;
2485
2486 r = -E2BIG;
2487 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2488 goto out;
2489 r = -EFAULT;
2490 if (copy_from_user(&vcpu->cpuid_entries, entries,
2491 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2492 goto out;
2493 vcpu->cpuid_nent = cpuid->nent;
2cc51560 2494 cpuid_fix_nx_cap(vcpu);
06465c5a
AK
2495 return 0;
2496
2497out:
2498 return r;
2499}
2500
1961d276
AK
2501static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2502{
2503 if (sigset) {
2504 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2505 vcpu->sigset_active = 1;
2506 vcpu->sigset = *sigset;
2507 } else
2508 vcpu->sigset_active = 0;
2509 return 0;
2510}
2511
b8836737
AK
2512/*
2513 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2514 * we have asm/x86/processor.h
2515 */
2516struct fxsave {
2517 u16 cwd;
2518 u16 swd;
2519 u16 twd;
2520 u16 fop;
2521 u64 rip;
2522 u64 rdp;
2523 u32 mxcsr;
2524 u32 mxcsr_mask;
2525 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2526#ifdef CONFIG_X86_64
2527 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2528#else
2529 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2530#endif
2531};
2532
2533static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2534{
2535 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2536
2537 vcpu_load(vcpu);
2538
2539 memcpy(fpu->fpr, fxsave->st_space, 128);
2540 fpu->fcw = fxsave->cwd;
2541 fpu->fsw = fxsave->swd;
2542 fpu->ftwx = fxsave->twd;
2543 fpu->last_opcode = fxsave->fop;
2544 fpu->last_ip = fxsave->rip;
2545 fpu->last_dp = fxsave->rdp;
2546 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2547
2548 vcpu_put(vcpu);
2549
2550 return 0;
2551}
2552
2553static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2554{
2555 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2556
2557 vcpu_load(vcpu);
2558
2559 memcpy(fxsave->st_space, fpu->fpr, 128);
2560 fxsave->cwd = fpu->fcw;
2561 fxsave->swd = fpu->fsw;
2562 fxsave->twd = fpu->ftwx;
2563 fxsave->fop = fpu->last_opcode;
2564 fxsave->rip = fpu->last_ip;
2565 fxsave->rdp = fpu->last_dp;
2566 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2567
2568 vcpu_put(vcpu);
2569
2570 return 0;
2571}
2572
bccf2150
AK
2573static long kvm_vcpu_ioctl(struct file *filp,
2574 unsigned int ioctl, unsigned long arg)
6aa8b732 2575{
bccf2150 2576 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 2577 void __user *argp = (void __user *)arg;
6aa8b732
AK
2578 int r = -EINVAL;
2579
2580 switch (ioctl) {
9a2bb7f4 2581 case KVM_RUN:
f0fe5108
AK
2582 r = -EINVAL;
2583 if (arg)
2584 goto out;
9a2bb7f4 2585 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 2586 break;
6aa8b732
AK
2587 case KVM_GET_REGS: {
2588 struct kvm_regs kvm_regs;
2589
bccf2150
AK
2590 memset(&kvm_regs, 0, sizeof kvm_regs);
2591 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
2592 if (r)
2593 goto out;
2594 r = -EFAULT;
2f366987 2595 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
2596 goto out;
2597 r = 0;
2598 break;
2599 }
2600 case KVM_SET_REGS: {
2601 struct kvm_regs kvm_regs;
2602
2603 r = -EFAULT;
2f366987 2604 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 2605 goto out;
bccf2150 2606 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
2607 if (r)
2608 goto out;
2609 r = 0;
2610 break;
2611 }
2612 case KVM_GET_SREGS: {
2613 struct kvm_sregs kvm_sregs;
2614
bccf2150
AK
2615 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2616 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
2617 if (r)
2618 goto out;
2619 r = -EFAULT;
2f366987 2620 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
2621 goto out;
2622 r = 0;
2623 break;
2624 }
2625 case KVM_SET_SREGS: {
2626 struct kvm_sregs kvm_sregs;
2627
2628 r = -EFAULT;
2f366987 2629 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 2630 goto out;
bccf2150 2631 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
2632 if (r)
2633 goto out;
2634 r = 0;
2635 break;
2636 }
2637 case KVM_TRANSLATE: {
2638 struct kvm_translation tr;
2639
2640 r = -EFAULT;
2f366987 2641 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 2642 goto out;
bccf2150 2643 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
2644 if (r)
2645 goto out;
2646 r = -EFAULT;
2f366987 2647 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
2648 goto out;
2649 r = 0;
2650 break;
2651 }
2652 case KVM_INTERRUPT: {
2653 struct kvm_interrupt irq;
2654
2655 r = -EFAULT;
2f366987 2656 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 2657 goto out;
bccf2150 2658 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
2659 if (r)
2660 goto out;
2661 r = 0;
2662 break;
2663 }
2664 case KVM_DEBUG_GUEST: {
2665 struct kvm_debug_guest dbg;
2666
2667 r = -EFAULT;
2f366987 2668 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 2669 goto out;
bccf2150 2670 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
2671 if (r)
2672 goto out;
2673 r = 0;
2674 break;
2675 }
bccf2150 2676 case KVM_GET_MSRS:
35f3f286 2677 r = msr_io(vcpu, argp, kvm_get_msr, 1);
bccf2150
AK
2678 break;
2679 case KVM_SET_MSRS:
2680 r = msr_io(vcpu, argp, do_set_msr, 0);
2681 break;
06465c5a
AK
2682 case KVM_SET_CPUID: {
2683 struct kvm_cpuid __user *cpuid_arg = argp;
2684 struct kvm_cpuid cpuid;
2685
2686 r = -EFAULT;
2687 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2688 goto out;
2689 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2690 if (r)
2691 goto out;
2692 break;
2693 }
1961d276
AK
2694 case KVM_SET_SIGNAL_MASK: {
2695 struct kvm_signal_mask __user *sigmask_arg = argp;
2696 struct kvm_signal_mask kvm_sigmask;
2697 sigset_t sigset, *p;
2698
2699 p = NULL;
2700 if (argp) {
2701 r = -EFAULT;
2702 if (copy_from_user(&kvm_sigmask, argp,
2703 sizeof kvm_sigmask))
2704 goto out;
2705 r = -EINVAL;
2706 if (kvm_sigmask.len != sizeof sigset)
2707 goto out;
2708 r = -EFAULT;
2709 if (copy_from_user(&sigset, sigmask_arg->sigset,
2710 sizeof sigset))
2711 goto out;
2712 p = &sigset;
2713 }
2714 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2715 break;
2716 }
b8836737
AK
2717 case KVM_GET_FPU: {
2718 struct kvm_fpu fpu;
2719
2720 memset(&fpu, 0, sizeof fpu);
2721 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
2722 if (r)
2723 goto out;
2724 r = -EFAULT;
2725 if (copy_to_user(argp, &fpu, sizeof fpu))
2726 goto out;
2727 r = 0;
2728 break;
2729 }
2730 case KVM_SET_FPU: {
2731 struct kvm_fpu fpu;
2732
2733 r = -EFAULT;
2734 if (copy_from_user(&fpu, argp, sizeof fpu))
2735 goto out;
2736 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
2737 if (r)
2738 goto out;
2739 r = 0;
2740 break;
2741 }
bccf2150
AK
2742 default:
2743 ;
2744 }
2745out:
2746 return r;
2747}
2748
2749static long kvm_vm_ioctl(struct file *filp,
2750 unsigned int ioctl, unsigned long arg)
2751{
2752 struct kvm *kvm = filp->private_data;
2753 void __user *argp = (void __user *)arg;
2754 int r = -EINVAL;
2755
2756 switch (ioctl) {
2757 case KVM_CREATE_VCPU:
2758 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2759 if (r < 0)
2760 goto out;
2761 break;
6aa8b732
AK
2762 case KVM_SET_MEMORY_REGION: {
2763 struct kvm_memory_region kvm_mem;
2764
2765 r = -EFAULT;
2f366987 2766 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
6aa8b732 2767 goto out;
2c6f5df9 2768 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
6aa8b732
AK
2769 if (r)
2770 goto out;
2771 break;
2772 }
2773 case KVM_GET_DIRTY_LOG: {
2774 struct kvm_dirty_log log;
2775
2776 r = -EFAULT;
2f366987 2777 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2778 goto out;
2c6f5df9 2779 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2780 if (r)
2781 goto out;
2782 break;
2783 }
e8207547
AK
2784 case KVM_SET_MEMORY_ALIAS: {
2785 struct kvm_memory_alias alias;
2786
2787 r = -EFAULT;
2788 if (copy_from_user(&alias, argp, sizeof alias))
2789 goto out;
2790 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
2791 if (r)
2792 goto out;
2793 break;
2794 }
f17abe9a
AK
2795 default:
2796 ;
2797 }
2798out:
2799 return r;
2800}
2801
2802static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2803 unsigned long address,
2804 int *type)
2805{
2806 struct kvm *kvm = vma->vm_file->private_data;
2807 unsigned long pgoff;
f17abe9a
AK
2808 struct page *page;
2809
f17abe9a 2810 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
954bbbc2 2811 page = gfn_to_page(kvm, pgoff);
f17abe9a
AK
2812 if (!page)
2813 return NOPAGE_SIGBUS;
2814 get_page(page);
cd0d9137
NAQ
2815 if (type != NULL)
2816 *type = VM_FAULT_MINOR;
2817
f17abe9a
AK
2818 return page;
2819}
2820
2821static struct vm_operations_struct kvm_vm_vm_ops = {
2822 .nopage = kvm_vm_nopage,
2823};
2824
2825static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2826{
2827 vma->vm_ops = &kvm_vm_vm_ops;
2828 return 0;
2829}
2830
2831static struct file_operations kvm_vm_fops = {
2832 .release = kvm_vm_release,
2833 .unlocked_ioctl = kvm_vm_ioctl,
2834 .compat_ioctl = kvm_vm_ioctl,
2835 .mmap = kvm_vm_mmap,
2836};
2837
2838static int kvm_dev_ioctl_create_vm(void)
2839{
2840 int fd, r;
2841 struct inode *inode;
2842 struct file *file;
2843 struct kvm *kvm;
2844
f17abe9a 2845 kvm = kvm_create_vm();
d6d28168
AK
2846 if (IS_ERR(kvm))
2847 return PTR_ERR(kvm);
2848 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2849 if (r) {
2850 kvm_destroy_vm(kvm);
2851 return r;
f17abe9a
AK
2852 }
2853
bccf2150 2854 kvm->filp = file;
f17abe9a 2855
f17abe9a 2856 return fd;
f17abe9a
AK
2857}
2858
2859static long kvm_dev_ioctl(struct file *filp,
2860 unsigned int ioctl, unsigned long arg)
2861{
2862 void __user *argp = (void __user *)arg;
07c45a36 2863 long r = -EINVAL;
f17abe9a
AK
2864
2865 switch (ioctl) {
2866 case KVM_GET_API_VERSION:
f0fe5108
AK
2867 r = -EINVAL;
2868 if (arg)
2869 goto out;
f17abe9a
AK
2870 r = KVM_API_VERSION;
2871 break;
2872 case KVM_CREATE_VM:
f0fe5108
AK
2873 r = -EINVAL;
2874 if (arg)
2875 goto out;
f17abe9a
AK
2876 r = kvm_dev_ioctl_create_vm();
2877 break;
6aa8b732 2878 case KVM_GET_MSR_INDEX_LIST: {
2f366987 2879 struct kvm_msr_list __user *user_msr_list = argp;
6aa8b732
AK
2880 struct kvm_msr_list msr_list;
2881 unsigned n;
2882
2883 r = -EFAULT;
2884 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2885 goto out;
2886 n = msr_list.nmsrs;
6f00e68f 2887 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
6aa8b732
AK
2888 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2889 goto out;
2890 r = -E2BIG;
bf591b24 2891 if (n < num_msrs_to_save)
6aa8b732
AK
2892 goto out;
2893 r = -EFAULT;
2894 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
bf591b24 2895 num_msrs_to_save * sizeof(u32)))
6aa8b732 2896 goto out;
6f00e68f
AK
2897 if (copy_to_user(user_msr_list->indices
2898 + num_msrs_to_save * sizeof(u32),
2899 &emulated_msrs,
2900 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2901 goto out;
6aa8b732 2902 r = 0;
cc1d8955 2903 break;
6aa8b732 2904 }
5d308f45
AK
2905 case KVM_CHECK_EXTENSION:
2906 /*
2907 * No extensions defined at present.
2908 */
2909 r = 0;
2910 break;
07c45a36
AK
2911 case KVM_GET_VCPU_MMAP_SIZE:
2912 r = -EINVAL;
2913 if (arg)
2914 goto out;
039576c0 2915 r = 2 * PAGE_SIZE;
07c45a36 2916 break;
6aa8b732
AK
2917 default:
2918 ;
2919 }
2920out:
2921 return r;
2922}
2923
6aa8b732
AK
2924static struct file_operations kvm_chardev_ops = {
2925 .open = kvm_dev_open,
2926 .release = kvm_dev_release,
2927 .unlocked_ioctl = kvm_dev_ioctl,
2928 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
2929};
2930
2931static struct miscdevice kvm_dev = {
bbe4432e 2932 KVM_MINOR,
6aa8b732
AK
2933 "kvm",
2934 &kvm_chardev_ops,
2935};
2936
774c47f1
AK
2937/*
2938 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2939 * cached on it.
2940 */
2941static void decache_vcpus_on_cpu(int cpu)
2942{
2943 struct kvm *vm;
2944 struct kvm_vcpu *vcpu;
2945 int i;
2946
2947 spin_lock(&kvm_lock);
fb3f0f51
RR
2948 list_for_each_entry(vm, &vm_list, vm_list) {
2949 spin_lock(&vm->lock);
774c47f1 2950 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
2951 vcpu = vm->vcpus[i];
2952 if (!vcpu)
2953 continue;
774c47f1
AK
2954 /*
2955 * If the vcpu is locked, then it is running on some
2956 * other cpu and therefore it is not cached on the
2957 * cpu in question.
2958 *
2959 * If it's not locked, check the last cpu it executed
2960 * on.
2961 */
2962 if (mutex_trylock(&vcpu->mutex)) {
2963 if (vcpu->cpu == cpu) {
2964 kvm_arch_ops->vcpu_decache(vcpu);
2965 vcpu->cpu = -1;
2966 }
2967 mutex_unlock(&vcpu->mutex);
2968 }
2969 }
fb3f0f51
RR
2970 spin_unlock(&vm->lock);
2971 }
774c47f1
AK
2972 spin_unlock(&kvm_lock);
2973}
2974
1b6c0168
AK
2975static void hardware_enable(void *junk)
2976{
2977 int cpu = raw_smp_processor_id();
2978
2979 if (cpu_isset(cpu, cpus_hardware_enabled))
2980 return;
2981 cpu_set(cpu, cpus_hardware_enabled);
2982 kvm_arch_ops->hardware_enable(NULL);
2983}
2984
2985static void hardware_disable(void *junk)
2986{
2987 int cpu = raw_smp_processor_id();
2988
2989 if (!cpu_isset(cpu, cpus_hardware_enabled))
2990 return;
2991 cpu_clear(cpu, cpus_hardware_enabled);
2992 decache_vcpus_on_cpu(cpu);
2993 kvm_arch_ops->hardware_disable(NULL);
2994}
2995
774c47f1
AK
2996static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2997 void *v)
2998{
2999 int cpu = (long)v;
3000
3001 switch (val) {
cec9ad27
AK
3002 case CPU_DYING:
3003 case CPU_DYING_FROZEN:
6ec8a856
AK
3004 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3005 cpu);
3006 hardware_disable(NULL);
3007 break;
774c47f1 3008 case CPU_UP_CANCELED:
8bb78442 3009 case CPU_UP_CANCELED_FROZEN:
43934a38
JK
3010 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3011 cpu);
1b6c0168 3012 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 3013 break;
43934a38 3014 case CPU_ONLINE:
8bb78442 3015 case CPU_ONLINE_FROZEN:
43934a38
JK
3016 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
3017 cpu);
1b6c0168 3018 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
3019 break;
3020 }
3021 return NOTIFY_OK;
3022}
3023
9a2b85c6
RR
3024static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3025 void *v)
3026{
3027 if (val == SYS_RESTART) {
3028 /*
3029 * Some (well, at least mine) BIOSes hang on reboot if
3030 * in vmx root mode.
3031 */
3032 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
3033 on_each_cpu(hardware_disable, NULL, 0, 1);
3034 }
3035 return NOTIFY_OK;
3036}
3037
3038static struct notifier_block kvm_reboot_notifier = {
3039 .notifier_call = kvm_reboot,
3040 .priority = 0,
3041};
3042
2eeb2e94
GH
3043void kvm_io_bus_init(struct kvm_io_bus *bus)
3044{
3045 memset(bus, 0, sizeof(*bus));
3046}
3047
3048void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3049{
3050 int i;
3051
3052 for (i = 0; i < bus->dev_count; i++) {
3053 struct kvm_io_device *pos = bus->devs[i];
3054
3055 kvm_iodevice_destructor(pos);
3056 }
3057}
3058
3059struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3060{
3061 int i;
3062
3063 for (i = 0; i < bus->dev_count; i++) {
3064 struct kvm_io_device *pos = bus->devs[i];
3065
3066 if (pos->in_range(pos, addr))
3067 return pos;
3068 }
3069
3070 return NULL;
3071}
3072
3073void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3074{
3075 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3076
3077 bus->devs[bus->dev_count++] = dev;
3078}
3079
774c47f1
AK
3080static struct notifier_block kvm_cpu_notifier = {
3081 .notifier_call = kvm_cpu_hotplug,
3082 .priority = 20, /* must be > scheduler priority */
3083};
3084
1165f5fe
AK
3085static u64 stat_get(void *_offset)
3086{
3087 unsigned offset = (long)_offset;
3088 u64 total = 0;
3089 struct kvm *kvm;
3090 struct kvm_vcpu *vcpu;
3091 int i;
3092
3093 spin_lock(&kvm_lock);
3094 list_for_each_entry(kvm, &vm_list, vm_list)
3095 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
3096 vcpu = kvm->vcpus[i];
3097 if (vcpu)
3098 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
3099 }
3100 spin_unlock(&kvm_lock);
3101 return total;
3102}
3103
3104static void stat_set(void *offset, u64 val)
3105{
3106}
3107
3108DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, stat_set, "%llu\n");
3109
6aa8b732
AK
3110static __init void kvm_init_debug(void)
3111{
3112 struct kvm_stats_debugfs_item *p;
3113
8b6d44c7 3114 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 3115 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
3116 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
3117 (void *)(long)p->offset,
3118 &stat_fops);
6aa8b732
AK
3119}
3120
3121static void kvm_exit_debug(void)
3122{
3123 struct kvm_stats_debugfs_item *p;
3124
3125 for (p = debugfs_entries; p->name; ++p)
3126 debugfs_remove(p->dentry);
3127 debugfs_remove(debugfs_dir);
3128}
3129
59ae6c6b
AK
3130static int kvm_suspend(struct sys_device *dev, pm_message_t state)
3131{
4267c41a 3132 hardware_disable(NULL);
59ae6c6b
AK
3133 return 0;
3134}
3135
3136static int kvm_resume(struct sys_device *dev)
3137{
4267c41a 3138 hardware_enable(NULL);
59ae6c6b
AK
3139 return 0;
3140}
3141
3142static struct sysdev_class kvm_sysdev_class = {
3143 set_kset_name("kvm"),
3144 .suspend = kvm_suspend,
3145 .resume = kvm_resume,
3146};
3147
3148static struct sys_device kvm_sysdev = {
3149 .id = 0,
3150 .cls = &kvm_sysdev_class,
3151};
3152
6aa8b732
AK
3153hpa_t bad_page_address;
3154
15ad7146
AK
3155static inline
3156struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3157{
3158 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3159}
3160
3161static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3162{
3163 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3164
3165 kvm_arch_ops->vcpu_load(vcpu, cpu);
3166}
3167
3168static void kvm_sched_out(struct preempt_notifier *pn,
3169 struct task_struct *next)
3170{
3171 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3172
3173 kvm_arch_ops->vcpu_put(vcpu);
3174}
3175
6aa8b732
AK
3176int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3177{
3178 int r;
3179
09db28b8
YI
3180 if (kvm_arch_ops) {
3181 printk(KERN_ERR "kvm: already loaded the other module\n");
3182 return -EEXIST;
3183 }
3184
e097f35c 3185 if (!ops->cpu_has_kvm_support()) {
6aa8b732
AK
3186 printk(KERN_ERR "kvm: no hardware support\n");
3187 return -EOPNOTSUPP;
3188 }
e097f35c 3189 if (ops->disabled_by_bios()) {
6aa8b732
AK
3190 printk(KERN_ERR "kvm: disabled by bios\n");
3191 return -EOPNOTSUPP;
3192 }
3193
e097f35c
YI
3194 kvm_arch_ops = ops;
3195
6aa8b732
AK
3196 r = kvm_arch_ops->hardware_setup();
3197 if (r < 0)
ca45aaae 3198 goto out;
6aa8b732 3199
1b6c0168 3200 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
3201 r = register_cpu_notifier(&kvm_cpu_notifier);
3202 if (r)
3203 goto out_free_1;
6aa8b732
AK
3204 register_reboot_notifier(&kvm_reboot_notifier);
3205
59ae6c6b
AK
3206 r = sysdev_class_register(&kvm_sysdev_class);
3207 if (r)
3208 goto out_free_2;
3209
3210 r = sysdev_register(&kvm_sysdev);
3211 if (r)
3212 goto out_free_3;
3213
6aa8b732
AK
3214 kvm_chardev_ops.owner = module;
3215
3216 r = misc_register(&kvm_dev);
3217 if (r) {
3218 printk (KERN_ERR "kvm: misc device register failed\n");
3219 goto out_free;
3220 }
3221
15ad7146
AK
3222 kvm_preempt_ops.sched_in = kvm_sched_in;
3223 kvm_preempt_ops.sched_out = kvm_sched_out;
3224
6aa8b732
AK
3225 return r;
3226
3227out_free:
59ae6c6b
AK
3228 sysdev_unregister(&kvm_sysdev);
3229out_free_3:
3230 sysdev_class_unregister(&kvm_sysdev_class);
3231out_free_2:
6aa8b732 3232 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
3233 unregister_cpu_notifier(&kvm_cpu_notifier);
3234out_free_1:
1b6c0168 3235 on_each_cpu(hardware_disable, NULL, 0, 1);
6aa8b732 3236 kvm_arch_ops->hardware_unsetup();
ca45aaae
AK
3237out:
3238 kvm_arch_ops = NULL;
6aa8b732
AK
3239 return r;
3240}
3241
3242void kvm_exit_arch(void)
3243{
3244 misc_deregister(&kvm_dev);
59ae6c6b
AK
3245 sysdev_unregister(&kvm_sysdev);
3246 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 3247 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 3248 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 3249 on_each_cpu(hardware_disable, NULL, 0, 1);
6aa8b732 3250 kvm_arch_ops->hardware_unsetup();
09db28b8 3251 kvm_arch_ops = NULL;
6aa8b732
AK
3252}
3253
3254static __init int kvm_init(void)
3255{
3256 static struct page *bad_page;
37e29d90
AK
3257 int r;
3258
b5a33a75
AK
3259 r = kvm_mmu_module_init();
3260 if (r)
3261 goto out4;
3262
6aa8b732
AK
3263 kvm_init_debug();
3264
bf591b24
MR
3265 kvm_init_msr_list();
3266
6aa8b732
AK
3267 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
3268 r = -ENOMEM;
3269 goto out;
3270 }
3271
3272 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
3273 memset(__va(bad_page_address), 0, PAGE_SIZE);
3274
58e690e6 3275 return 0;
6aa8b732
AK
3276
3277out:
3278 kvm_exit_debug();
b5a33a75
AK
3279 kvm_mmu_module_exit();
3280out4:
6aa8b732
AK
3281 return r;
3282}
3283
3284static __exit void kvm_exit(void)
3285{
3286 kvm_exit_debug();
3287 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
b5a33a75 3288 kvm_mmu_module_exit();
6aa8b732
AK
3289}
3290
3291module_init(kvm_init)
3292module_exit(kvm_exit)
3293
3294EXPORT_SYMBOL_GPL(kvm_init_arch);
3295EXPORT_SYMBOL_GPL(kvm_exit_arch);