]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/kvm/kvm_main.c
KVM: Portability: Move control register helper functions to x86.c
[net-next-2.6.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
043405e1 19#include "x86.h"
e495606d 20#include "x86_emulate.h"
85f455f7 21#include "irq.h"
6aa8b732
AK
22
23#include <linux/kvm.h>
24#include <linux/module.h>
25#include <linux/errno.h>
6aa8b732
AK
26#include <linux/percpu.h>
27#include <linux/gfp.h>
6aa8b732
AK
28#include <linux/mm.h>
29#include <linux/miscdevice.h>
30#include <linux/vmalloc.h>
6aa8b732 31#include <linux/reboot.h>
6aa8b732
AK
32#include <linux/debugfs.h>
33#include <linux/highmem.h>
34#include <linux/file.h>
59ae6c6b 35#include <linux/sysdev.h>
774c47f1 36#include <linux/cpu.h>
e8edc6e0 37#include <linux/sched.h>
d9e368d6
AK
38#include <linux/cpumask.h>
39#include <linux/smp.h>
d6d28168 40#include <linux/anon_inodes.h>
04d2cc77 41#include <linux/profile.h>
7aa81cc0 42#include <linux/kvm_para.h>
6fc138d2 43#include <linux/pagemap.h>
8d4e1288 44#include <linux/mman.h>
6aa8b732 45
e495606d
AK
46#include <asm/processor.h>
47#include <asm/msr.h>
48#include <asm/io.h>
49#include <asm/uaccess.h>
50#include <asm/desc.h>
6aa8b732
AK
51
52MODULE_AUTHOR("Qumranet");
53MODULE_LICENSE("GPL");
54
133de902
AK
55static DEFINE_SPINLOCK(kvm_lock);
56static LIST_HEAD(vm_list);
57
1b6c0168
AK
58static cpumask_t cpus_hardware_enabled;
59
cbdd1bea 60struct kvm_x86_ops *kvm_x86_ops;
c16f862d
RR
61struct kmem_cache *kvm_vcpu_cache;
62EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 63
15ad7146
AK
64static __read_mostly struct preempt_ops kvm_preempt_ops;
65
1165f5fe 66#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
6aa8b732
AK
67
68static struct kvm_stats_debugfs_item {
69 const char *name;
1165f5fe 70 int offset;
6aa8b732
AK
71 struct dentry *dentry;
72} debugfs_entries[] = {
1165f5fe
AK
73 { "pf_fixed", STAT_OFFSET(pf_fixed) },
74 { "pf_guest", STAT_OFFSET(pf_guest) },
75 { "tlb_flush", STAT_OFFSET(tlb_flush) },
76 { "invlpg", STAT_OFFSET(invlpg) },
77 { "exits", STAT_OFFSET(exits) },
78 { "io_exits", STAT_OFFSET(io_exits) },
79 { "mmio_exits", STAT_OFFSET(mmio_exits) },
80 { "signal_exits", STAT_OFFSET(signal_exits) },
81 { "irq_window", STAT_OFFSET(irq_window_exits) },
82 { "halt_exits", STAT_OFFSET(halt_exits) },
b6958ce4 83 { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
1165f5fe
AK
84 { "request_irq", STAT_OFFSET(request_irq_exits) },
85 { "irq_exits", STAT_OFFSET(irq_exits) },
e6adf283 86 { "light_exits", STAT_OFFSET(light_exits) },
2cc51560 87 { "efer_reload", STAT_OFFSET(efer_reload) },
1165f5fe 88 { NULL }
6aa8b732
AK
89};
90
91static struct dentry *debugfs_dir;
92
6aa8b732
AK
93#define EFER_RESERVED_BITS 0xfffffffffffff2fe
94
bccf2150
AK
95static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
96 unsigned long arg);
97
5aacf0ca
JM
98static inline int valid_vcpu(int n)
99{
100 return likely(n >= 0 && n < KVM_MAX_VCPUS);
101}
102
7702fd1f
AK
103void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
104{
105 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
106 return;
107
108 vcpu->guest_fpu_loaded = 1;
b114b080
RR
109 fx_save(&vcpu->host_fx_image);
110 fx_restore(&vcpu->guest_fx_image);
7702fd1f
AK
111}
112EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
113
114void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
115{
116 if (!vcpu->guest_fpu_loaded)
117 return;
118
119 vcpu->guest_fpu_loaded = 0;
b114b080
RR
120 fx_save(&vcpu->guest_fx_image);
121 fx_restore(&vcpu->host_fx_image);
7702fd1f
AK
122}
123EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
124
bccf2150
AK
125/*
126 * Switches to specified vcpu, until a matching vcpu_put()
127 */
313a3dc7 128void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 129{
15ad7146
AK
130 int cpu;
131
bccf2150 132 mutex_lock(&vcpu->mutex);
15ad7146
AK
133 cpu = get_cpu();
134 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 135 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 136 put_cpu();
6aa8b732
AK
137}
138
313a3dc7 139void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 140{
15ad7146 141 preempt_disable();
313a3dc7 142 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
143 preempt_notifier_unregister(&vcpu->preempt_notifier);
144 preempt_enable();
6aa8b732
AK
145 mutex_unlock(&vcpu->mutex);
146}
147
d9e368d6
AK
148static void ack_flush(void *_completed)
149{
d9e368d6
AK
150}
151
152void kvm_flush_remote_tlbs(struct kvm *kvm)
153{
49d3bd7e 154 int i, cpu;
d9e368d6
AK
155 cpumask_t cpus;
156 struct kvm_vcpu *vcpu;
d9e368d6 157
d9e368d6 158 cpus_clear(cpus);
fb3f0f51
RR
159 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
160 vcpu = kvm->vcpus[i];
161 if (!vcpu)
162 continue;
3176bc3e 163 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
164 continue;
165 cpu = vcpu->cpu;
166 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 167 cpu_set(cpu, cpus);
d9e368d6 168 }
49d3bd7e 169 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
170}
171
fb3f0f51
RR
172int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
173{
174 struct page *page;
175 int r;
176
177 mutex_init(&vcpu->mutex);
178 vcpu->cpu = -1;
179 vcpu->mmu.root_hpa = INVALID_PAGE;
180 vcpu->kvm = kvm;
181 vcpu->vcpu_id = id;
c5ec1534
HQ
182 if (!irqchip_in_kernel(kvm) || id == 0)
183 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
184 else
185 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
b6958ce4 186 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
187
188 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
189 if (!page) {
190 r = -ENOMEM;
191 goto fail;
192 }
193 vcpu->run = page_address(page);
194
195 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
196 if (!page) {
197 r = -ENOMEM;
198 goto fail_free_run;
199 }
200 vcpu->pio_data = page_address(page);
201
fb3f0f51
RR
202 r = kvm_mmu_create(vcpu);
203 if (r < 0)
204 goto fail_free_pio_data;
205
76fafa5e
RR
206 if (irqchip_in_kernel(kvm)) {
207 r = kvm_create_lapic(vcpu);
208 if (r < 0)
209 goto fail_mmu_destroy;
210 }
211
fb3f0f51
RR
212 return 0;
213
76fafa5e
RR
214fail_mmu_destroy:
215 kvm_mmu_destroy(vcpu);
fb3f0f51
RR
216fail_free_pio_data:
217 free_page((unsigned long)vcpu->pio_data);
218fail_free_run:
219 free_page((unsigned long)vcpu->run);
220fail:
76fafa5e 221 return r;
fb3f0f51
RR
222}
223EXPORT_SYMBOL_GPL(kvm_vcpu_init);
224
225void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
226{
d589444e 227 kvm_free_lapic(vcpu);
fb3f0f51
RR
228 kvm_mmu_destroy(vcpu);
229 free_page((unsigned long)vcpu->pio_data);
230 free_page((unsigned long)vcpu->run);
231}
232EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
233
f17abe9a 234static struct kvm *kvm_create_vm(void)
6aa8b732
AK
235{
236 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
6aa8b732
AK
237
238 if (!kvm)
f17abe9a 239 return ERR_PTR(-ENOMEM);
6aa8b732 240
74906345 241 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 242 mutex_init(&kvm->lock);
6aa8b732 243 INIT_LIST_HEAD(&kvm->active_mmu_pages);
2eeb2e94 244 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
245 spin_lock(&kvm_lock);
246 list_add(&kvm->vm_list, &vm_list);
247 spin_unlock(&kvm_lock);
f17abe9a
AK
248 return kvm;
249}
250
6aa8b732
AK
251/*
252 * Free any memory in @free but not in @dont.
253 */
254static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
255 struct kvm_memory_slot *dont)
256{
290fc38d
IE
257 if (!dont || free->rmap != dont->rmap)
258 vfree(free->rmap);
6aa8b732
AK
259
260 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
261 vfree(free->dirty_bitmap);
262
6aa8b732 263 free->npages = 0;
8b6d44c7 264 free->dirty_bitmap = NULL;
8d4e1288 265 free->rmap = NULL;
6aa8b732
AK
266}
267
268static void kvm_free_physmem(struct kvm *kvm)
269{
270 int i;
271
272 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 273 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
274}
275
039576c0
AK
276static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
277{
278 int i;
279
3077c451 280 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
039576c0 281 if (vcpu->pio.guest_pages[i]) {
8a7ae055 282 kvm_release_page(vcpu->pio.guest_pages[i]);
039576c0
AK
283 vcpu->pio.guest_pages[i] = NULL;
284 }
285}
286
7b53aa56
AK
287static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
288{
7b53aa56
AK
289 vcpu_load(vcpu);
290 kvm_mmu_unload(vcpu);
291 vcpu_put(vcpu);
292}
293
6aa8b732
AK
294static void kvm_free_vcpus(struct kvm *kvm)
295{
296 unsigned int i;
297
7b53aa56
AK
298 /*
299 * Unpin any mmu pages first.
300 */
301 for (i = 0; i < KVM_MAX_VCPUS; ++i)
fb3f0f51
RR
302 if (kvm->vcpus[i])
303 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
304 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
305 if (kvm->vcpus[i]) {
cbdd1bea 306 kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
fb3f0f51
RR
307 kvm->vcpus[i] = NULL;
308 }
309 }
310
6aa8b732
AK
311}
312
f17abe9a
AK
313static void kvm_destroy_vm(struct kvm *kvm)
314{
133de902
AK
315 spin_lock(&kvm_lock);
316 list_del(&kvm->vm_list);
317 spin_unlock(&kvm_lock);
74906345 318 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 319 kvm_io_bus_destroy(&kvm->mmio_bus);
85f455f7 320 kfree(kvm->vpic);
1fd4f2a5 321 kfree(kvm->vioapic);
6aa8b732
AK
322 kvm_free_vcpus(kvm);
323 kvm_free_physmem(kvm);
324 kfree(kvm);
f17abe9a
AK
325}
326
327static int kvm_vm_release(struct inode *inode, struct file *filp)
328{
329 struct kvm *kvm = filp->private_data;
330
331 kvm_destroy_vm(kvm);
6aa8b732
AK
332 return 0;
333}
334
335static void inject_gp(struct kvm_vcpu *vcpu)
336{
cbdd1bea 337 kvm_x86_ops->inject_gp(vcpu, 0);
6aa8b732
AK
338}
339
6aa8b732
AK
340void fx_init(struct kvm_vcpu *vcpu)
341{
b114b080 342 unsigned after_mxcsr_mask;
6aa8b732 343
9bd01506
RR
344 /* Initialize guest FPU by resetting ours and saving into guest's */
345 preempt_disable();
b114b080 346 fx_save(&vcpu->host_fx_image);
6aa8b732 347 fpu_init();
b114b080
RR
348 fx_save(&vcpu->guest_fx_image);
349 fx_restore(&vcpu->host_fx_image);
9bd01506 350 preempt_enable();
6aa8b732 351
380102c8 352 vcpu->cr0 |= X86_CR0_ET;
b114b080
RR
353 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
354 vcpu->guest_fx_image.mxcsr = 0x1f80;
355 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
356 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
6aa8b732
AK
357}
358EXPORT_SYMBOL_GPL(fx_init);
359
6aa8b732
AK
360/*
361 * Allocate some memory and give it an address in the guest physical address
362 * space.
363 *
364 * Discontiguous memory is allowed, mostly for framebuffers.
365 */
210c7c4d
IE
366int kvm_set_memory_region(struct kvm *kvm,
367 struct kvm_userspace_memory_region *mem,
368 int user_alloc)
6aa8b732
AK
369{
370 int r;
371 gfn_t base_gfn;
372 unsigned long npages;
373 unsigned long i;
374 struct kvm_memory_slot *memslot;
375 struct kvm_memory_slot old, new;
6aa8b732
AK
376
377 r = -EINVAL;
378 /* General sanity checks */
379 if (mem->memory_size & (PAGE_SIZE - 1))
380 goto out;
381 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
382 goto out;
e0d62c7f 383 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
384 goto out;
385 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
386 goto out;
387
388 memslot = &kvm->memslots[mem->slot];
389 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
390 npages = mem->memory_size >> PAGE_SHIFT;
391
392 if (!npages)
393 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
394
11ec2804 395 mutex_lock(&kvm->lock);
6aa8b732 396
6aa8b732
AK
397 new = old = *memslot;
398
399 new.base_gfn = base_gfn;
400 new.npages = npages;
401 new.flags = mem->flags;
402
403 /* Disallow changing a memory slot's size. */
404 r = -EINVAL;
405 if (npages && old.npages && npages != old.npages)
406 goto out_unlock;
407
408 /* Check for overlaps */
409 r = -EEXIST;
410 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
411 struct kvm_memory_slot *s = &kvm->memslots[i];
412
413 if (s == memslot)
414 continue;
415 if (!((base_gfn + npages <= s->base_gfn) ||
416 (base_gfn >= s->base_gfn + s->npages)))
417 goto out_unlock;
418 }
6aa8b732 419
6aa8b732
AK
420 /* Free page dirty bitmap if unneeded */
421 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 422 new.dirty_bitmap = NULL;
6aa8b732
AK
423
424 r = -ENOMEM;
425
426 /* Allocate if a slot is being created */
8d4e1288 427 if (npages && !new.rmap) {
d77c26fc 428 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
429
430 if (!new.rmap)
431 goto out_unlock;
432
290fc38d 433 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 434
80b14b5b 435 new.user_alloc = user_alloc;
8d4e1288 436 if (user_alloc)
8a7ae055 437 new.userspace_addr = mem->userspace_addr;
8d4e1288
AL
438 else {
439 down_write(&current->mm->mmap_sem);
440 new.userspace_addr = do_mmap(NULL, 0,
441 npages * PAGE_SIZE,
442 PROT_READ | PROT_WRITE,
443 MAP_SHARED | MAP_ANONYMOUS,
444 0);
445 up_write(&current->mm->mmap_sem);
446
447 if (IS_ERR((void *)new.userspace_addr))
448 goto out_unlock;
6aa8b732 449 }
80b14b5b
IE
450 } else {
451 if (!old.user_alloc && old.rmap) {
452 int ret;
453
454 down_write(&current->mm->mmap_sem);
455 ret = do_munmap(current->mm, old.userspace_addr,
456 old.npages * PAGE_SIZE);
457 up_write(&current->mm->mmap_sem);
458 if (ret < 0)
459 printk(KERN_WARNING
460 "kvm_vm_ioctl_set_memory_region: "
461 "failed to munmap memory\n");
462 }
6aa8b732
AK
463 }
464
465 /* Allocate page dirty bitmap if needed */
466 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
467 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
468
469 new.dirty_bitmap = vmalloc(dirty_bytes);
470 if (!new.dirty_bitmap)
0d8d2bd4 471 goto out_unlock;
6aa8b732
AK
472 memset(new.dirty_bitmap, 0, dirty_bytes);
473 }
474
6aa8b732
AK
475 if (mem->slot >= kvm->nmemslots)
476 kvm->nmemslots = mem->slot + 1;
477
82ce2c96
IE
478 if (!kvm->n_requested_mmu_pages) {
479 unsigned int n_pages;
480
481 if (npages) {
482 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
483 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
484 n_pages);
485 } else {
486 unsigned int nr_mmu_pages;
487
488 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
489 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
490 nr_mmu_pages = max(nr_mmu_pages,
491 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
492 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
493 }
494 }
495
6aa8b732 496 *memslot = new;
6aa8b732 497
90cb0529
AK
498 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
499 kvm_flush_remote_tlbs(kvm);
6aa8b732 500
11ec2804 501 mutex_unlock(&kvm->lock);
6aa8b732
AK
502
503 kvm_free_physmem_slot(&old, &new);
504 return 0;
505
506out_unlock:
11ec2804 507 mutex_unlock(&kvm->lock);
6aa8b732
AK
508 kvm_free_physmem_slot(&new, &old);
509out:
510 return r;
210c7c4d
IE
511
512}
513EXPORT_SYMBOL_GPL(kvm_set_memory_region);
514
1fe779f8
CO
515int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
516 struct
517 kvm_userspace_memory_region *mem,
518 int user_alloc)
210c7c4d 519{
e0d62c7f
IE
520 if (mem->slot >= KVM_MEMORY_SLOTS)
521 return -EINVAL;
210c7c4d 522 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
523}
524
525/*
526 * Get (and clear) the dirty memory log for a memory slot.
527 */
2c6f5df9
AK
528static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
529 struct kvm_dirty_log *log)
6aa8b732
AK
530{
531 struct kvm_memory_slot *memslot;
532 int r, i;
533 int n;
534 unsigned long any = 0;
535
11ec2804 536 mutex_lock(&kvm->lock);
6aa8b732 537
6aa8b732
AK
538 r = -EINVAL;
539 if (log->slot >= KVM_MEMORY_SLOTS)
540 goto out;
541
542 memslot = &kvm->memslots[log->slot];
543 r = -ENOENT;
544 if (!memslot->dirty_bitmap)
545 goto out;
546
cd1a4a98 547 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 548
cd1a4a98 549 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
550 any = memslot->dirty_bitmap[i];
551
552 r = -EFAULT;
553 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
554 goto out;
555
39214915
RR
556 /* If nothing is dirty, don't bother messing with page tables. */
557 if (any) {
39214915
RR
558 kvm_mmu_slot_remove_write_access(kvm, log->slot);
559 kvm_flush_remote_tlbs(kvm);
560 memset(memslot->dirty_bitmap, 0, n);
39214915 561 }
6aa8b732
AK
562
563 r = 0;
564
565out:
11ec2804 566 mutex_unlock(&kvm->lock);
6aa8b732
AK
567 return r;
568}
569
cea7bb21
IE
570int is_error_page(struct page *page)
571{
572 return page == bad_page;
573}
574EXPORT_SYMBOL_GPL(is_error_page);
575
290fc38d 576gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
e8207547
AK
577{
578 int i;
579 struct kvm_mem_alias *alias;
580
581 for (i = 0; i < kvm->naliases; ++i) {
582 alias = &kvm->aliases[i];
583 if (gfn >= alias->base_gfn
584 && gfn < alias->base_gfn + alias->npages)
585 return alias->target_gfn + gfn - alias->base_gfn;
586 }
587 return gfn;
588}
589
590static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
591{
592 int i;
593
594 for (i = 0; i < kvm->nmemslots; ++i) {
595 struct kvm_memory_slot *memslot = &kvm->memslots[i];
596
597 if (gfn >= memslot->base_gfn
598 && gfn < memslot->base_gfn + memslot->npages)
599 return memslot;
600 }
8b6d44c7 601 return NULL;
6aa8b732 602}
e8207547
AK
603
604struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
605{
606 gfn = unalias_gfn(kvm, gfn);
607 return __gfn_to_memslot(kvm, gfn);
608}
6aa8b732 609
e0d62c7f
IE
610int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
611{
612 int i;
613
614 gfn = unalias_gfn(kvm, gfn);
615 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
616 struct kvm_memory_slot *memslot = &kvm->memslots[i];
617
618 if (gfn >= memslot->base_gfn
619 && gfn < memslot->base_gfn + memslot->npages)
620 return 1;
621 }
622 return 0;
623}
624EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
625
954bbbc2
AK
626struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
627{
628 struct kvm_memory_slot *slot;
8d4e1288
AL
629 struct page *page[1];
630 int npages;
954bbbc2 631
60395224
AK
632 might_sleep();
633
e8207547
AK
634 gfn = unalias_gfn(kvm, gfn);
635 slot = __gfn_to_memslot(kvm, gfn);
8a7ae055
IE
636 if (!slot) {
637 get_page(bad_page);
cea7bb21 638 return bad_page;
8a7ae055 639 }
8d4e1288
AL
640
641 down_read(&current->mm->mmap_sem);
642 npages = get_user_pages(current, current->mm,
643 slot->userspace_addr
644 + (gfn - slot->base_gfn) * PAGE_SIZE, 1,
645 1, 1, page, NULL);
646 up_read(&current->mm->mmap_sem);
647 if (npages != 1) {
648 get_page(bad_page);
649 return bad_page;
8a7ae055 650 }
8d4e1288
AL
651
652 return page[0];
954bbbc2
AK
653}
654EXPORT_SYMBOL_GPL(gfn_to_page);
655
8a7ae055
IE
656void kvm_release_page(struct page *page)
657{
658 if (!PageReserved(page))
659 SetPageDirty(page);
660 put_page(page);
661}
662EXPORT_SYMBOL_GPL(kvm_release_page);
663
195aefde
IE
664static int next_segment(unsigned long len, int offset)
665{
666 if (len > PAGE_SIZE - offset)
667 return PAGE_SIZE - offset;
668 else
669 return len;
670}
671
672int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
673 int len)
674{
675 void *page_virt;
676 struct page *page;
677
678 page = gfn_to_page(kvm, gfn);
8a7ae055
IE
679 if (is_error_page(page)) {
680 kvm_release_page(page);
195aefde 681 return -EFAULT;
8a7ae055 682 }
195aefde
IE
683 page_virt = kmap_atomic(page, KM_USER0);
684
685 memcpy(data, page_virt + offset, len);
686
687 kunmap_atomic(page_virt, KM_USER0);
8a7ae055 688 kvm_release_page(page);
195aefde
IE
689 return 0;
690}
691EXPORT_SYMBOL_GPL(kvm_read_guest_page);
692
693int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
694{
695 gfn_t gfn = gpa >> PAGE_SHIFT;
696 int seg;
697 int offset = offset_in_page(gpa);
698 int ret;
699
700 while ((seg = next_segment(len, offset)) != 0) {
701 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
702 if (ret < 0)
703 return ret;
704 offset = 0;
705 len -= seg;
706 data += seg;
707 ++gfn;
708 }
709 return 0;
710}
711EXPORT_SYMBOL_GPL(kvm_read_guest);
712
713int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
714 int offset, int len)
715{
716 void *page_virt;
717 struct page *page;
718
719 page = gfn_to_page(kvm, gfn);
8a7ae055
IE
720 if (is_error_page(page)) {
721 kvm_release_page(page);
195aefde 722 return -EFAULT;
8a7ae055 723 }
195aefde
IE
724 page_virt = kmap_atomic(page, KM_USER0);
725
726 memcpy(page_virt + offset, data, len);
727
728 kunmap_atomic(page_virt, KM_USER0);
729 mark_page_dirty(kvm, gfn);
8a7ae055 730 kvm_release_page(page);
195aefde
IE
731 return 0;
732}
733EXPORT_SYMBOL_GPL(kvm_write_guest_page);
734
735int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
736 unsigned long len)
737{
738 gfn_t gfn = gpa >> PAGE_SHIFT;
739 int seg;
740 int offset = offset_in_page(gpa);
741 int ret;
742
743 while ((seg = next_segment(len, offset)) != 0) {
744 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
745 if (ret < 0)
746 return ret;
747 offset = 0;
748 len -= seg;
749 data += seg;
750 ++gfn;
751 }
752 return 0;
753}
754
755int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
756{
757 void *page_virt;
758 struct page *page;
759
760 page = gfn_to_page(kvm, gfn);
8a7ae055
IE
761 if (is_error_page(page)) {
762 kvm_release_page(page);
195aefde 763 return -EFAULT;
8a7ae055 764 }
195aefde
IE
765 page_virt = kmap_atomic(page, KM_USER0);
766
767 memset(page_virt + offset, 0, len);
768
769 kunmap_atomic(page_virt, KM_USER0);
8a7ae055 770 kvm_release_page(page);
195aefde
IE
771 return 0;
772}
773EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
774
775int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
776{
777 gfn_t gfn = gpa >> PAGE_SHIFT;
778 int seg;
779 int offset = offset_in_page(gpa);
780 int ret;
781
782 while ((seg = next_segment(len, offset)) != 0) {
783 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
784 if (ret < 0)
785 return ret;
786 offset = 0;
787 len -= seg;
788 ++gfn;
789 }
790 return 0;
791}
792EXPORT_SYMBOL_GPL(kvm_clear_guest);
793
7e9d619d 794/* WARNING: Does not work on aliased pages. */
6aa8b732
AK
795void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
796{
31389947 797 struct kvm_memory_slot *memslot;
6aa8b732 798
7e9d619d
RR
799 memslot = __gfn_to_memslot(kvm, gfn);
800 if (memslot && memslot->dirty_bitmap) {
801 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 802
7e9d619d
RR
803 /* avoid RMW */
804 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
805 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
806 }
807}
808
e7d5d76c 809int emulator_read_std(unsigned long addr,
4c690a1e 810 void *val,
6aa8b732 811 unsigned int bytes,
cebff02b 812 struct kvm_vcpu *vcpu)
6aa8b732 813{
6aa8b732
AK
814 void *data = val;
815
816 while (bytes) {
817 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
818 unsigned offset = addr & (PAGE_SIZE-1);
819 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
195aefde 820 int ret;
6aa8b732
AK
821
822 if (gpa == UNMAPPED_GVA)
823 return X86EMUL_PROPAGATE_FAULT;
195aefde
IE
824 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
825 if (ret < 0)
6aa8b732 826 return X86EMUL_UNHANDLEABLE;
6aa8b732
AK
827
828 bytes -= tocopy;
829 data += tocopy;
830 addr += tocopy;
831 }
832
833 return X86EMUL_CONTINUE;
834}
e7d5d76c 835EXPORT_SYMBOL_GPL(emulator_read_std);
6aa8b732
AK
836
837static int emulator_write_std(unsigned long addr,
4c690a1e 838 const void *val,
6aa8b732 839 unsigned int bytes,
cebff02b 840 struct kvm_vcpu *vcpu)
6aa8b732 841{
f0242478 842 pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
6aa8b732
AK
843 return X86EMUL_UNHANDLEABLE;
844}
845
97222cc8
ED
846/*
847 * Only apic need an MMIO device hook, so shortcut now..
848 */
849static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
850 gpa_t addr)
851{
852 struct kvm_io_device *dev;
853
854 if (vcpu->apic) {
855 dev = &vcpu->apic->dev;
856 if (dev->in_range(dev, addr))
857 return dev;
858 }
859 return NULL;
860}
861
2eeb2e94
GH
862static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
863 gpa_t addr)
864{
97222cc8
ED
865 struct kvm_io_device *dev;
866
867 dev = vcpu_find_pervcpu_dev(vcpu, addr);
868 if (dev == NULL)
869 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
870 return dev;
2eeb2e94
GH
871}
872
74906345
ED
873static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
874 gpa_t addr)
875{
876 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
877}
878
6aa8b732 879static int emulator_read_emulated(unsigned long addr,
4c690a1e 880 void *val,
6aa8b732 881 unsigned int bytes,
cebff02b 882 struct kvm_vcpu *vcpu)
6aa8b732 883{
2eeb2e94
GH
884 struct kvm_io_device *mmio_dev;
885 gpa_t gpa;
6aa8b732
AK
886
887 if (vcpu->mmio_read_completed) {
888 memcpy(val, vcpu->mmio_data, bytes);
889 vcpu->mmio_read_completed = 0;
890 return X86EMUL_CONTINUE;
cebff02b 891 } else if (emulator_read_std(addr, val, bytes, vcpu)
6aa8b732
AK
892 == X86EMUL_CONTINUE)
893 return X86EMUL_CONTINUE;
d27d4aca 894
2eeb2e94
GH
895 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
896 if (gpa == UNMAPPED_GVA)
897 return X86EMUL_PROPAGATE_FAULT;
6aa8b732 898
2eeb2e94
GH
899 /*
900 * Is this MMIO handled locally?
901 */
902 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
903 if (mmio_dev) {
904 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
905 return X86EMUL_CONTINUE;
6aa8b732 906 }
2eeb2e94
GH
907
908 vcpu->mmio_needed = 1;
909 vcpu->mmio_phys_addr = gpa;
910 vcpu->mmio_size = bytes;
911 vcpu->mmio_is_write = 0;
912
913 return X86EMUL_UNHANDLEABLE;
6aa8b732
AK
914}
915
da4a00f0 916static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4c690a1e 917 const void *val, int bytes)
da4a00f0 918{
195aefde 919 int ret;
da4a00f0 920
195aefde
IE
921 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
922 if (ret < 0)
da4a00f0 923 return 0;
fe551881 924 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
da4a00f0
AK
925 return 1;
926}
927
b0fcd903
AK
928static int emulator_write_emulated_onepage(unsigned long addr,
929 const void *val,
930 unsigned int bytes,
cebff02b 931 struct kvm_vcpu *vcpu)
6aa8b732 932{
2eeb2e94
GH
933 struct kvm_io_device *mmio_dev;
934 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
6aa8b732 935
c9047f53 936 if (gpa == UNMAPPED_GVA) {
cbdd1bea 937 kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
6aa8b732 938 return X86EMUL_PROPAGATE_FAULT;
c9047f53 939 }
6aa8b732 940
da4a00f0
AK
941 if (emulator_write_phys(vcpu, gpa, val, bytes))
942 return X86EMUL_CONTINUE;
943
2eeb2e94
GH
944 /*
945 * Is this MMIO handled locally?
946 */
947 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
948 if (mmio_dev) {
949 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
950 return X86EMUL_CONTINUE;
951 }
952
6aa8b732
AK
953 vcpu->mmio_needed = 1;
954 vcpu->mmio_phys_addr = gpa;
955 vcpu->mmio_size = bytes;
956 vcpu->mmio_is_write = 1;
4c690a1e 957 memcpy(vcpu->mmio_data, val, bytes);
6aa8b732
AK
958
959 return X86EMUL_CONTINUE;
960}
961
e7d5d76c 962int emulator_write_emulated(unsigned long addr,
b0fcd903
AK
963 const void *val,
964 unsigned int bytes,
cebff02b 965 struct kvm_vcpu *vcpu)
b0fcd903
AK
966{
967 /* Crossing a page boundary? */
968 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
969 int rc, now;
970
971 now = -addr & ~PAGE_MASK;
cebff02b 972 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
b0fcd903
AK
973 if (rc != X86EMUL_CONTINUE)
974 return rc;
975 addr += now;
976 val += now;
977 bytes -= now;
978 }
cebff02b 979 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
b0fcd903 980}
e7d5d76c 981EXPORT_SYMBOL_GPL(emulator_write_emulated);
b0fcd903 982
6aa8b732 983static int emulator_cmpxchg_emulated(unsigned long addr,
4c690a1e
AK
984 const void *old,
985 const void *new,
6aa8b732 986 unsigned int bytes,
cebff02b 987 struct kvm_vcpu *vcpu)
6aa8b732
AK
988{
989 static int reported;
990
991 if (!reported) {
992 reported = 1;
993 printk(KERN_WARNING "kvm: emulating exchange as write\n");
994 }
cebff02b 995 return emulator_write_emulated(addr, new, bytes, vcpu);
6aa8b732
AK
996}
997
998static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
999{
cbdd1bea 1000 return kvm_x86_ops->get_segment_base(vcpu, seg);
6aa8b732
AK
1001}
1002
1003int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1004{
6aa8b732
AK
1005 return X86EMUL_CONTINUE;
1006}
1007
1008int emulate_clts(struct kvm_vcpu *vcpu)
1009{
404fb881 1010 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
6aa8b732
AK
1011 return X86EMUL_CONTINUE;
1012}
1013
d77c26fc 1014int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
6aa8b732
AK
1015{
1016 struct kvm_vcpu *vcpu = ctxt->vcpu;
1017
1018 switch (dr) {
1019 case 0 ... 3:
cbdd1bea 1020 *dest = kvm_x86_ops->get_dr(vcpu, dr);
6aa8b732
AK
1021 return X86EMUL_CONTINUE;
1022 default:
f0242478 1023 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
6aa8b732
AK
1024 return X86EMUL_UNHANDLEABLE;
1025 }
1026}
1027
1028int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1029{
1030 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1031 int exception;
1032
cbdd1bea 1033 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
6aa8b732
AK
1034 if (exception) {
1035 /* FIXME: better handling */
1036 return X86EMUL_UNHANDLEABLE;
1037 }
1038 return X86EMUL_CONTINUE;
1039}
1040
054b1369 1041void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
6aa8b732
AK
1042{
1043 static int reported;
1044 u8 opcodes[4];
054b1369 1045 unsigned long rip = vcpu->rip;
6aa8b732
AK
1046 unsigned long rip_linear;
1047
054b1369 1048 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
6aa8b732
AK
1049
1050 if (reported)
1051 return;
1052
054b1369 1053 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
6aa8b732 1054
054b1369
AK
1055 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1056 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
6aa8b732
AK
1057 reported = 1;
1058}
054b1369 1059EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
6aa8b732
AK
1060
1061struct x86_emulate_ops emulate_ops = {
1062 .read_std = emulator_read_std,
1063 .write_std = emulator_write_std,
1064 .read_emulated = emulator_read_emulated,
1065 .write_emulated = emulator_write_emulated,
1066 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1067};
1068
1069int emulate_instruction(struct kvm_vcpu *vcpu,
1070 struct kvm_run *run,
1071 unsigned long cr2,
3427318f
LV
1072 u16 error_code,
1073 int no_decode)
6aa8b732 1074{
a22436b7 1075 int r;
6aa8b732 1076
e7df56e4 1077 vcpu->mmio_fault_cr2 = cr2;
cbdd1bea 1078 kvm_x86_ops->cache_regs(vcpu);
6aa8b732 1079
6aa8b732 1080 vcpu->mmio_is_write = 0;
e70669ab 1081 vcpu->pio.string = 0;
3427318f
LV
1082
1083 if (!no_decode) {
1084 int cs_db, cs_l;
1085 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1086
1087 vcpu->emulate_ctxt.vcpu = vcpu;
1088 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1089 vcpu->emulate_ctxt.cr2 = cr2;
1090 vcpu->emulate_ctxt.mode =
1091 (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1092 ? X86EMUL_MODE_REAL : cs_l
1093 ? X86EMUL_MODE_PROT64 : cs_db
1094 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1095
1096 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1097 vcpu->emulate_ctxt.cs_base = 0;
1098 vcpu->emulate_ctxt.ds_base = 0;
1099 vcpu->emulate_ctxt.es_base = 0;
1100 vcpu->emulate_ctxt.ss_base = 0;
1101 } else {
1102 vcpu->emulate_ctxt.cs_base =
1103 get_segment_base(vcpu, VCPU_SREG_CS);
1104 vcpu->emulate_ctxt.ds_base =
1105 get_segment_base(vcpu, VCPU_SREG_DS);
1106 vcpu->emulate_ctxt.es_base =
1107 get_segment_base(vcpu, VCPU_SREG_ES);
1108 vcpu->emulate_ctxt.ss_base =
1109 get_segment_base(vcpu, VCPU_SREG_SS);
1110 }
1111
1112 vcpu->emulate_ctxt.gs_base =
1113 get_segment_base(vcpu, VCPU_SREG_GS);
1114 vcpu->emulate_ctxt.fs_base =
1115 get_segment_base(vcpu, VCPU_SREG_FS);
1116
1117 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
a22436b7
LV
1118 if (r) {
1119 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1120 return EMULATE_DONE;
1121 return EMULATE_FAIL;
1122 }
3427318f
LV
1123 }
1124
a22436b7 1125 r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
1be3aa47 1126
e70669ab
LV
1127 if (vcpu->pio.string)
1128 return EMULATE_DO_MMIO;
6aa8b732
AK
1129
1130 if ((r || vcpu->mmio_is_write) && run) {
8fc0d085 1131 run->exit_reason = KVM_EXIT_MMIO;
6aa8b732
AK
1132 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1133 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1134 run->mmio.len = vcpu->mmio_size;
1135 run->mmio.is_write = vcpu->mmio_is_write;
1136 }
1137
1138 if (r) {
a436036b
AK
1139 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1140 return EMULATE_DONE;
6aa8b732 1141 if (!vcpu->mmio_needed) {
054b1369 1142 kvm_report_emulation_failure(vcpu, "mmio");
6aa8b732
AK
1143 return EMULATE_FAIL;
1144 }
1145 return EMULATE_DO_MMIO;
1146 }
1147
cbdd1bea 1148 kvm_x86_ops->decache_regs(vcpu);
3427318f 1149 kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
6aa8b732 1150
02c83209
AK
1151 if (vcpu->mmio_is_write) {
1152 vcpu->mmio_needed = 0;
6aa8b732 1153 return EMULATE_DO_MMIO;
02c83209 1154 }
6aa8b732
AK
1155
1156 return EMULATE_DONE;
1157}
1158EXPORT_SYMBOL_GPL(emulate_instruction);
1159
b6958ce4
ED
1160/*
1161 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1162 */
c5ec1534 1163static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1164{
b6958ce4
ED
1165 DECLARE_WAITQUEUE(wait, current);
1166
1167 add_wait_queue(&vcpu->wq, &wait);
1168
1169 /*
1170 * We will block until either an interrupt or a signal wakes us up
1171 */
c5ec1534
HQ
1172 while (!kvm_cpu_has_interrupt(vcpu)
1173 && !signal_pending(current)
1174 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
1175 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
b6958ce4
ED
1176 set_current_state(TASK_INTERRUPTIBLE);
1177 vcpu_put(vcpu);
1178 schedule();
1179 vcpu_load(vcpu);
1180 }
d3bef15f 1181
c5ec1534 1182 __set_current_state(TASK_RUNNING);
b6958ce4 1183 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
1184}
1185
1186int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1187{
d3bef15f 1188 ++vcpu->stat.halt_exits;
b6958ce4 1189 if (irqchip_in_kernel(vcpu->kvm)) {
c5ec1534
HQ
1190 vcpu->mp_state = VCPU_MP_STATE_HALTED;
1191 kvm_vcpu_block(vcpu);
1192 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1193 return -EINTR;
b6958ce4
ED
1194 return 1;
1195 } else {
1196 vcpu->run->exit_reason = KVM_EXIT_HLT;
1197 return 0;
1198 }
d3bef15f
AK
1199}
1200EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1201
7aa81cc0 1202int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
270fd9b9 1203{
7aa81cc0 1204 unsigned long nr, a0, a1, a2, a3, ret;
270fd9b9 1205
cbdd1bea 1206 kvm_x86_ops->cache_regs(vcpu);
7aa81cc0
AL
1207
1208 nr = vcpu->regs[VCPU_REGS_RAX];
1209 a0 = vcpu->regs[VCPU_REGS_RBX];
1210 a1 = vcpu->regs[VCPU_REGS_RCX];
1211 a2 = vcpu->regs[VCPU_REGS_RDX];
1212 a3 = vcpu->regs[VCPU_REGS_RSI];
1213
1214 if (!is_long_mode(vcpu)) {
1215 nr &= 0xFFFFFFFF;
1216 a0 &= 0xFFFFFFFF;
1217 a1 &= 0xFFFFFFFF;
1218 a2 &= 0xFFFFFFFF;
1219 a3 &= 0xFFFFFFFF;
270fd9b9 1220 }
7aa81cc0 1221
270fd9b9
AK
1222 switch (nr) {
1223 default:
7aa81cc0
AL
1224 ret = -KVM_ENOSYS;
1225 break;
270fd9b9
AK
1226 }
1227 vcpu->regs[VCPU_REGS_RAX] = ret;
cbdd1bea 1228 kvm_x86_ops->decache_regs(vcpu);
7aa81cc0
AL
1229 return 0;
1230}
1231EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1232
1233int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
1234{
1235 char instruction[3];
1236 int ret = 0;
1237
1238 mutex_lock(&vcpu->kvm->lock);
1239
1240 /*
1241 * Blow out the MMU to ensure that no other VCPU has an active mapping
1242 * to ensure that the updated hypercall appears atomically across all
1243 * VCPUs.
1244 */
1245 kvm_mmu_zap_all(vcpu->kvm);
1246
1247 kvm_x86_ops->cache_regs(vcpu);
1248 kvm_x86_ops->patch_hypercall(vcpu, instruction);
1249 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
1250 != X86EMUL_CONTINUE)
1251 ret = -EFAULT;
1252
1253 mutex_unlock(&vcpu->kvm->lock);
1254
1255 return ret;
270fd9b9 1256}
270fd9b9 1257
6aa8b732
AK
1258static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1259{
1260 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1261}
1262
1263void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1264{
1265 struct descriptor_table dt = { limit, base };
1266
cbdd1bea 1267 kvm_x86_ops->set_gdt(vcpu, &dt);
6aa8b732
AK
1268}
1269
1270void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1271{
1272 struct descriptor_table dt = { limit, base };
1273
cbdd1bea 1274 kvm_x86_ops->set_idt(vcpu, &dt);
6aa8b732
AK
1275}
1276
1277void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1278 unsigned long *rflags)
1279{
1280 lmsw(vcpu, msw);
cbdd1bea 1281 *rflags = kvm_x86_ops->get_rflags(vcpu);
6aa8b732
AK
1282}
1283
1284unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1285{
cbdd1bea 1286 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
6aa8b732
AK
1287 switch (cr) {
1288 case 0:
1289 return vcpu->cr0;
1290 case 2:
1291 return vcpu->cr2;
1292 case 3:
1293 return vcpu->cr3;
1294 case 4:
1295 return vcpu->cr4;
1296 default:
1297 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1298 return 0;
1299 }
1300}
1301
1302void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1303 unsigned long *rflags)
1304{
1305 switch (cr) {
1306 case 0:
1307 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
cbdd1bea 1308 *rflags = kvm_x86_ops->get_rflags(vcpu);
6aa8b732
AK
1309 break;
1310 case 2:
1311 vcpu->cr2 = val;
1312 break;
1313 case 3:
1314 set_cr3(vcpu, val);
1315 break;
1316 case 4:
1317 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1318 break;
1319 default:
1320 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1321 }
1322}
1323
3bab1f5d
AK
1324int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1325{
1326 u64 data;
1327
1328 switch (msr) {
1329 case 0xc0010010: /* SYSCFG */
1330 case 0xc0010015: /* HWCR */
1331 case MSR_IA32_PLATFORM_ID:
1332 case MSR_IA32_P5_MC_ADDR:
1333 case MSR_IA32_P5_MC_TYPE:
1334 case MSR_IA32_MC0_CTL:
1335 case MSR_IA32_MCG_STATUS:
1336 case MSR_IA32_MCG_CAP:
1337 case MSR_IA32_MC0_MISC:
1338 case MSR_IA32_MC0_MISC+4:
1339 case MSR_IA32_MC0_MISC+8:
1340 case MSR_IA32_MC0_MISC+12:
1341 case MSR_IA32_MC0_MISC+16:
1342 case MSR_IA32_UCODE_REV:
a8d13ea2 1343 case MSR_IA32_PERF_STATUS:
2dc7094b 1344 case MSR_IA32_EBL_CR_POWERON:
3bab1f5d
AK
1345 /* MTRR registers */
1346 case 0xfe:
1347 case 0x200 ... 0x2ff:
1348 data = 0;
1349 break;
a8d13ea2
AK
1350 case 0xcd: /* fsb frequency */
1351 data = 3;
1352 break;
3bab1f5d 1353 case MSR_IA32_APICBASE:
7017fc3d 1354 data = kvm_get_apic_base(vcpu);
3bab1f5d 1355 break;
6f00e68f
AK
1356 case MSR_IA32_MISC_ENABLE:
1357 data = vcpu->ia32_misc_enable_msr;
1358 break;
3bab1f5d
AK
1359#ifdef CONFIG_X86_64
1360 case MSR_EFER:
1361 data = vcpu->shadow_efer;
1362 break;
1363#endif
1364 default:
f0242478 1365 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
3bab1f5d
AK
1366 return 1;
1367 }
1368 *pdata = data;
1369 return 0;
1370}
1371EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1372
6aa8b732
AK
1373/*
1374 * Reads an msr value (of 'msr_index') into 'pdata'.
1375 * Returns 0 on success, non-0 otherwise.
1376 * Assumes vcpu_load() was already called.
1377 */
35f3f286 1378int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
6aa8b732 1379{
cbdd1bea 1380 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
6aa8b732
AK
1381}
1382
05b3e0c2 1383#ifdef CONFIG_X86_64
6aa8b732 1384
3bab1f5d 1385static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
6aa8b732 1386{
6aa8b732
AK
1387 if (efer & EFER_RESERVED_BITS) {
1388 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1389 efer);
1390 inject_gp(vcpu);
1391 return;
1392 }
1393
1394 if (is_paging(vcpu)
1395 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1396 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1397 inject_gp(vcpu);
1398 return;
1399 }
1400
cbdd1bea 1401 kvm_x86_ops->set_efer(vcpu, efer);
7725f0ba 1402
6aa8b732
AK
1403 efer &= ~EFER_LMA;
1404 efer |= vcpu->shadow_efer & EFER_LMA;
1405
1406 vcpu->shadow_efer = efer;
6aa8b732 1407}
6aa8b732
AK
1408
1409#endif
1410
3bab1f5d
AK
1411int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1412{
1413 switch (msr) {
1414#ifdef CONFIG_X86_64
1415 case MSR_EFER:
1416 set_efer(vcpu, data);
1417 break;
1418#endif
1419 case MSR_IA32_MC0_STATUS:
f0242478 1420 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
3bab1f5d
AK
1421 __FUNCTION__, data);
1422 break;
0e5bf0d0 1423 case MSR_IA32_MCG_STATUS:
f0242478 1424 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
0e5bf0d0
SK
1425 __FUNCTION__, data);
1426 break;
3bab1f5d
AK
1427 case MSR_IA32_UCODE_REV:
1428 case MSR_IA32_UCODE_WRITE:
1429 case 0x200 ... 0x2ff: /* MTRRs */
1430 break;
1431 case MSR_IA32_APICBASE:
7017fc3d 1432 kvm_set_apic_base(vcpu, data);
3bab1f5d 1433 break;
6f00e68f
AK
1434 case MSR_IA32_MISC_ENABLE:
1435 vcpu->ia32_misc_enable_msr = data;
1436 break;
3bab1f5d 1437 default:
f0242478 1438 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
3bab1f5d
AK
1439 return 1;
1440 }
1441 return 0;
1442}
1443EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1444
6aa8b732
AK
1445/*
1446 * Writes msr value into into the appropriate "register".
1447 * Returns 0 on success, non-0 otherwise.
1448 * Assumes vcpu_load() was already called.
1449 */
35f3f286 1450int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
6aa8b732 1451{
cbdd1bea 1452 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
6aa8b732
AK
1453}
1454
1455void kvm_resched(struct kvm_vcpu *vcpu)
1456{
3fca0365
YD
1457 if (!need_resched())
1458 return;
6aa8b732 1459 cond_resched();
6aa8b732
AK
1460}
1461EXPORT_SYMBOL_GPL(kvm_resched);
1462
06465c5a
AK
1463void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1464{
1465 int i;
1466 u32 function;
1467 struct kvm_cpuid_entry *e, *best;
1468
cbdd1bea 1469 kvm_x86_ops->cache_regs(vcpu);
06465c5a
AK
1470 function = vcpu->regs[VCPU_REGS_RAX];
1471 vcpu->regs[VCPU_REGS_RAX] = 0;
1472 vcpu->regs[VCPU_REGS_RBX] = 0;
1473 vcpu->regs[VCPU_REGS_RCX] = 0;
1474 vcpu->regs[VCPU_REGS_RDX] = 0;
1475 best = NULL;
1476 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1477 e = &vcpu->cpuid_entries[i];
1478 if (e->function == function) {
1479 best = e;
1480 break;
1481 }
1482 /*
1483 * Both basic or both extended?
1484 */
1485 if (((e->function ^ function) & 0x80000000) == 0)
1486 if (!best || e->function > best->function)
1487 best = e;
1488 }
1489 if (best) {
1490 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1491 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1492 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1493 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1494 }
cbdd1bea
CE
1495 kvm_x86_ops->decache_regs(vcpu);
1496 kvm_x86_ops->skip_emulated_instruction(vcpu);
06465c5a
AK
1497}
1498EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1499
039576c0 1500static int pio_copy_data(struct kvm_vcpu *vcpu)
46fc1477 1501{
039576c0
AK
1502 void *p = vcpu->pio_data;
1503 void *q;
1504 unsigned bytes;
1505 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1506
039576c0
AK
1507 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1508 PAGE_KERNEL);
1509 if (!q) {
039576c0
AK
1510 free_pio_guest_pages(vcpu);
1511 return -ENOMEM;
1512 }
1513 q += vcpu->pio.guest_page_offset;
1514 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1515 if (vcpu->pio.in)
1516 memcpy(q, p, bytes);
1517 else
1518 memcpy(p, q, bytes);
1519 q -= vcpu->pio.guest_page_offset;
1520 vunmap(q);
039576c0
AK
1521 free_pio_guest_pages(vcpu);
1522 return 0;
1523}
1524
1525static int complete_pio(struct kvm_vcpu *vcpu)
1526{
1527 struct kvm_pio_request *io = &vcpu->pio;
46fc1477 1528 long delta;
039576c0 1529 int r;
46fc1477 1530
cbdd1bea 1531 kvm_x86_ops->cache_regs(vcpu);
46fc1477
AK
1532
1533 if (!io->string) {
039576c0
AK
1534 if (io->in)
1535 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
46fc1477
AK
1536 io->size);
1537 } else {
039576c0
AK
1538 if (io->in) {
1539 r = pio_copy_data(vcpu);
1540 if (r) {
cbdd1bea 1541 kvm_x86_ops->cache_regs(vcpu);
039576c0
AK
1542 return r;
1543 }
1544 }
1545
46fc1477
AK
1546 delta = 1;
1547 if (io->rep) {
039576c0 1548 delta *= io->cur_count;
46fc1477
AK
1549 /*
1550 * The size of the register should really depend on
1551 * current address size.
1552 */
1553 vcpu->regs[VCPU_REGS_RCX] -= delta;
1554 }
039576c0 1555 if (io->down)
46fc1477
AK
1556 delta = -delta;
1557 delta *= io->size;
039576c0 1558 if (io->in)
46fc1477
AK
1559 vcpu->regs[VCPU_REGS_RDI] += delta;
1560 else
1561 vcpu->regs[VCPU_REGS_RSI] += delta;
1562 }
1563
cbdd1bea 1564 kvm_x86_ops->decache_regs(vcpu);
46fc1477 1565
039576c0
AK
1566 io->count -= io->cur_count;
1567 io->cur_count = 0;
1568
039576c0 1569 return 0;
46fc1477
AK
1570}
1571
65619eb5
ED
1572static void kernel_pio(struct kvm_io_device *pio_dev,
1573 struct kvm_vcpu *vcpu,
1574 void *pd)
74906345
ED
1575{
1576 /* TODO: String I/O for in kernel device */
1577
9cf98828 1578 mutex_lock(&vcpu->kvm->lock);
74906345
ED
1579 if (vcpu->pio.in)
1580 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1581 vcpu->pio.size,
65619eb5 1582 pd);
74906345
ED
1583 else
1584 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1585 vcpu->pio.size,
65619eb5 1586 pd);
9cf98828 1587 mutex_unlock(&vcpu->kvm->lock);
65619eb5
ED
1588}
1589
1590static void pio_string_write(struct kvm_io_device *pio_dev,
1591 struct kvm_vcpu *vcpu)
1592{
1593 struct kvm_pio_request *io = &vcpu->pio;
1594 void *pd = vcpu->pio_data;
1595 int i;
1596
9cf98828 1597 mutex_lock(&vcpu->kvm->lock);
65619eb5
ED
1598 for (i = 0; i < io->cur_count; i++) {
1599 kvm_iodevice_write(pio_dev, io->port,
1600 io->size,
1601 pd);
1602 pd += io->size;
1603 }
9cf98828 1604 mutex_unlock(&vcpu->kvm->lock);
74906345
ED
1605}
1606
d77c26fc 1607int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
3090dd73
LV
1608 int size, unsigned port)
1609{
1610 struct kvm_io_device *pio_dev;
1611
1612 vcpu->run->exit_reason = KVM_EXIT_IO;
1613 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1614 vcpu->run->io.size = vcpu->pio.size = size;
1615 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1616 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1617 vcpu->run->io.port = vcpu->pio.port = port;
1618 vcpu->pio.in = in;
1619 vcpu->pio.string = 0;
1620 vcpu->pio.down = 0;
1621 vcpu->pio.guest_page_offset = 0;
1622 vcpu->pio.rep = 0;
1623
cbdd1bea 1624 kvm_x86_ops->cache_regs(vcpu);
3090dd73 1625 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
cbdd1bea 1626 kvm_x86_ops->decache_regs(vcpu);
3090dd73 1627
0967b7bf
AK
1628 kvm_x86_ops->skip_emulated_instruction(vcpu);
1629
3090dd73
LV
1630 pio_dev = vcpu_find_pio_dev(vcpu, port);
1631 if (pio_dev) {
1632 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1633 complete_pio(vcpu);
1634 return 1;
1635 }
1636 return 0;
1637}
1638EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1639
1640int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1641 int size, unsigned long count, int down,
039576c0
AK
1642 gva_t address, int rep, unsigned port)
1643{
1644 unsigned now, in_page;
65619eb5 1645 int i, ret = 0;
039576c0
AK
1646 int nr_pages = 1;
1647 struct page *page;
74906345 1648 struct kvm_io_device *pio_dev;
039576c0
AK
1649
1650 vcpu->run->exit_reason = KVM_EXIT_IO;
1651 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3090dd73 1652 vcpu->run->io.size = vcpu->pio.size = size;
039576c0 1653 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3090dd73
LV
1654 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1655 vcpu->run->io.port = vcpu->pio.port = port;
039576c0 1656 vcpu->pio.in = in;
3090dd73 1657 vcpu->pio.string = 1;
039576c0
AK
1658 vcpu->pio.down = down;
1659 vcpu->pio.guest_page_offset = offset_in_page(address);
1660 vcpu->pio.rep = rep;
1661
039576c0 1662 if (!count) {
cbdd1bea 1663 kvm_x86_ops->skip_emulated_instruction(vcpu);
039576c0
AK
1664 return 1;
1665 }
1666
039576c0
AK
1667 if (!down)
1668 in_page = PAGE_SIZE - offset_in_page(address);
1669 else
1670 in_page = offset_in_page(address) + size;
1671 now = min(count, (unsigned long)in_page / size);
1672 if (!now) {
1673 /*
1674 * String I/O straddles page boundary. Pin two guest pages
1675 * so that we satisfy atomicity constraints. Do just one
1676 * transaction to avoid complexity.
1677 */
1678 nr_pages = 2;
1679 now = 1;
1680 }
1681 if (down) {
1682 /*
1683 * String I/O in reverse. Yuck. Kill the guest, fix later.
1684 */
f0242478 1685 pr_unimpl(vcpu, "guest string pio down\n");
039576c0
AK
1686 inject_gp(vcpu);
1687 return 1;
1688 }
1689 vcpu->run->io.count = now;
1690 vcpu->pio.cur_count = now;
1691
0967b7bf
AK
1692 if (vcpu->pio.cur_count == vcpu->pio.count)
1693 kvm_x86_ops->skip_emulated_instruction(vcpu);
1694
039576c0 1695 for (i = 0; i < nr_pages; ++i) {
11ec2804 1696 mutex_lock(&vcpu->kvm->lock);
039576c0 1697 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
039576c0 1698 vcpu->pio.guest_pages[i] = page;
11ec2804 1699 mutex_unlock(&vcpu->kvm->lock);
039576c0
AK
1700 if (!page) {
1701 inject_gp(vcpu);
1702 free_pio_guest_pages(vcpu);
1703 return 1;
1704 }
1705 }
1706
3090dd73 1707 pio_dev = vcpu_find_pio_dev(vcpu, port);
65619eb5
ED
1708 if (!vcpu->pio.in) {
1709 /* string PIO write */
1710 ret = pio_copy_data(vcpu);
1711 if (ret >= 0 && pio_dev) {
1712 pio_string_write(pio_dev, vcpu);
1713 complete_pio(vcpu);
1714 if (vcpu->pio.count == 0)
1715 ret = 1;
1716 }
1717 } else if (pio_dev)
f0242478 1718 pr_unimpl(vcpu, "no string pio read support yet, "
65619eb5
ED
1719 "port %x size %d count %ld\n",
1720 port, size, count);
1721
1722 return ret;
039576c0 1723}
3090dd73 1724EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
039576c0 1725
04d2cc77
AK
1726/*
1727 * Check if userspace requested an interrupt window, and that the
1728 * interrupt window is open.
1729 *
1730 * No need to exit to userspace if we already have an interrupt queued.
1731 */
1732static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1733 struct kvm_run *kvm_run)
1734{
1735 return (!vcpu->irq_summary &&
1736 kvm_run->request_interrupt_window &&
1737 vcpu->interrupt_window_open &&
1738 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1739}
1740
1741static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1742 struct kvm_run *kvm_run)
1743{
1744 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1745 kvm_run->cr8 = get_cr8(vcpu);
1746 kvm_run->apic_base = kvm_get_apic_base(vcpu);
1747 if (irqchip_in_kernel(vcpu->kvm))
1748 kvm_run->ready_for_interrupt_injection = 1;
1749 else
1750 kvm_run->ready_for_interrupt_injection =
1751 (vcpu->interrupt_window_open &&
1752 vcpu->irq_summary == 0);
1753}
1754
1755static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1756{
1757 int r;
1758
1759 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
d77c26fc 1760 pr_debug("vcpu %d received sipi with vector # %x\n",
04d2cc77
AK
1761 vcpu->vcpu_id, vcpu->sipi_vector);
1762 kvm_lapic_reset(vcpu);
e00c8cf2
AK
1763 r = kvm_x86_ops->vcpu_reset(vcpu);
1764 if (r)
1765 return r;
04d2cc77
AK
1766 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
1767 }
1768
1769preempted:
1770 if (vcpu->guest_debug.enabled)
1771 kvm_x86_ops->guest_debug_pre(vcpu);
1772
1773again:
1774 r = kvm_mmu_reload(vcpu);
1775 if (unlikely(r))
1776 goto out;
1777
ab6ef34b
AK
1778 kvm_inject_pending_timer_irqs(vcpu);
1779
04d2cc77
AK
1780 preempt_disable();
1781
1782 kvm_x86_ops->prepare_guest_switch(vcpu);
1783 kvm_load_guest_fpu(vcpu);
1784
1785 local_irq_disable();
1786
1787 if (signal_pending(current)) {
1788 local_irq_enable();
1789 preempt_enable();
1790 r = -EINTR;
1791 kvm_run->exit_reason = KVM_EXIT_INTR;
1792 ++vcpu->stat.signal_exits;
1793 goto out;
1794 }
1795
1796 if (irqchip_in_kernel(vcpu->kvm))
1797 kvm_x86_ops->inject_pending_irq(vcpu);
1798 else if (!vcpu->mmio_read_completed)
1799 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
1800
1801 vcpu->guest_mode = 1;
d172fcd3 1802 kvm_guest_enter();
04d2cc77
AK
1803
1804 if (vcpu->requests)
3176bc3e 1805 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
04d2cc77
AK
1806 kvm_x86_ops->tlb_flush(vcpu);
1807
1808 kvm_x86_ops->run(vcpu, kvm_run);
1809
1810 vcpu->guest_mode = 0;
1811 local_irq_enable();
1812
1813 ++vcpu->stat.exits;
1814
0552f73b
LV
1815 /*
1816 * We must have an instruction between local_irq_enable() and
1817 * kvm_guest_exit(), so the timer interrupt isn't delayed by
1818 * the interrupt shadow. The stat.exits increment will do nicely.
1819 * But we need to prevent reordering, hence this barrier():
1820 */
1821 barrier();
1822
1823 kvm_guest_exit();
1824
04d2cc77
AK
1825 preempt_enable();
1826
1827 /*
1828 * Profile KVM exit RIPs:
1829 */
1830 if (unlikely(prof_on == KVM_PROFILING)) {
1831 kvm_x86_ops->cache_regs(vcpu);
1832 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
1833 }
1834
1835 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
1836
1837 if (r > 0) {
1838 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1839 r = -EINTR;
1840 kvm_run->exit_reason = KVM_EXIT_INTR;
1841 ++vcpu->stat.request_irq_exits;
1842 goto out;
1843 }
1844 if (!need_resched()) {
1845 ++vcpu->stat.light_exits;
1846 goto again;
1847 }
1848 }
1849
1850out:
1851 if (r > 0) {
1852 kvm_resched(vcpu);
1853 goto preempted;
1854 }
1855
1856 post_kvm_run_save(vcpu, kvm_run);
1857
1858 return r;
1859}
1860
1861
bccf2150 1862static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6aa8b732 1863{
6aa8b732 1864 int r;
1961d276 1865 sigset_t sigsaved;
6aa8b732 1866
bccf2150 1867 vcpu_load(vcpu);
6aa8b732 1868
c5ec1534
HQ
1869 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
1870 kvm_vcpu_block(vcpu);
1871 vcpu_put(vcpu);
1872 return -EAGAIN;
1873 }
1874
1961d276
AK
1875 if (vcpu->sigset_active)
1876 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1877
54810342 1878 /* re-sync apic's tpr */
5cd4f6fd
HQ
1879 if (!irqchip_in_kernel(vcpu->kvm))
1880 set_cr8(vcpu, kvm_run->cr8);
54810342 1881
02c83209
AK
1882 if (vcpu->pio.cur_count) {
1883 r = complete_pio(vcpu);
1884 if (r)
1885 goto out;
1886 }
34c16eec 1887#if CONFIG_HAS_IOMEM
02c83209
AK
1888 if (vcpu->mmio_needed) {
1889 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1890 vcpu->mmio_read_completed = 1;
1891 vcpu->mmio_needed = 0;
1892 r = emulate_instruction(vcpu, kvm_run,
3427318f 1893 vcpu->mmio_fault_cr2, 0, 1);
02c83209
AK
1894 if (r == EMULATE_DO_MMIO) {
1895 /*
1896 * Read-modify-write. Back to userspace.
1897 */
02c83209
AK
1898 r = 0;
1899 goto out;
46fc1477 1900 }
6aa8b732 1901 }
34c16eec 1902#endif
8eb7d334 1903 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
cbdd1bea 1904 kvm_x86_ops->cache_regs(vcpu);
b4e63f56 1905 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
cbdd1bea 1906 kvm_x86_ops->decache_regs(vcpu);
b4e63f56
AK
1907 }
1908
04d2cc77 1909 r = __vcpu_run(vcpu, kvm_run);
6aa8b732 1910
039576c0 1911out:
1961d276
AK
1912 if (vcpu->sigset_active)
1913 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1914
6aa8b732
AK
1915 vcpu_put(vcpu);
1916 return r;
1917}
1918
bccf2150
AK
1919static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1920 struct kvm_regs *regs)
6aa8b732 1921{
bccf2150 1922 vcpu_load(vcpu);
6aa8b732 1923
cbdd1bea 1924 kvm_x86_ops->cache_regs(vcpu);
6aa8b732
AK
1925
1926 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1927 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1928 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1929 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1930 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1931 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1932 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1933 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
05b3e0c2 1934#ifdef CONFIG_X86_64
6aa8b732
AK
1935 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1936 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1937 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1938 regs->r11 = vcpu->regs[VCPU_REGS_R11];
1939 regs->r12 = vcpu->regs[VCPU_REGS_R12];
1940 regs->r13 = vcpu->regs[VCPU_REGS_R13];
1941 regs->r14 = vcpu->regs[VCPU_REGS_R14];
1942 regs->r15 = vcpu->regs[VCPU_REGS_R15];
1943#endif
1944
1945 regs->rip = vcpu->rip;
cbdd1bea 1946 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
6aa8b732
AK
1947
1948 /*
1949 * Don't leak debug flags in case they were set for guest debugging
1950 */
1951 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1952 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1953
1954 vcpu_put(vcpu);
1955
1956 return 0;
1957}
1958
bccf2150
AK
1959static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1960 struct kvm_regs *regs)
6aa8b732 1961{
bccf2150 1962 vcpu_load(vcpu);
6aa8b732
AK
1963
1964 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1965 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1966 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1967 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1968 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1969 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1970 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1971 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
05b3e0c2 1972#ifdef CONFIG_X86_64
6aa8b732
AK
1973 vcpu->regs[VCPU_REGS_R8] = regs->r8;
1974 vcpu->regs[VCPU_REGS_R9] = regs->r9;
1975 vcpu->regs[VCPU_REGS_R10] = regs->r10;
1976 vcpu->regs[VCPU_REGS_R11] = regs->r11;
1977 vcpu->regs[VCPU_REGS_R12] = regs->r12;
1978 vcpu->regs[VCPU_REGS_R13] = regs->r13;
1979 vcpu->regs[VCPU_REGS_R14] = regs->r14;
1980 vcpu->regs[VCPU_REGS_R15] = regs->r15;
1981#endif
1982
1983 vcpu->rip = regs->rip;
cbdd1bea 1984 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
6aa8b732 1985
cbdd1bea 1986 kvm_x86_ops->decache_regs(vcpu);
6aa8b732
AK
1987
1988 vcpu_put(vcpu);
1989
1990 return 0;
1991}
1992
1993static void get_segment(struct kvm_vcpu *vcpu,
1994 struct kvm_segment *var, int seg)
1995{
cbdd1bea 1996 return kvm_x86_ops->get_segment(vcpu, var, seg);
6aa8b732
AK
1997}
1998
bccf2150
AK
1999static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2000 struct kvm_sregs *sregs)
6aa8b732 2001{
6aa8b732 2002 struct descriptor_table dt;
2a8067f1 2003 int pending_vec;
6aa8b732 2004
bccf2150 2005 vcpu_load(vcpu);
6aa8b732
AK
2006
2007 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2008 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2009 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2010 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2011 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2012 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2013
2014 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2015 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2016
cbdd1bea 2017 kvm_x86_ops->get_idt(vcpu, &dt);
6aa8b732
AK
2018 sregs->idt.limit = dt.limit;
2019 sregs->idt.base = dt.base;
cbdd1bea 2020 kvm_x86_ops->get_gdt(vcpu, &dt);
6aa8b732
AK
2021 sregs->gdt.limit = dt.limit;
2022 sregs->gdt.base = dt.base;
2023
cbdd1bea 2024 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
6aa8b732
AK
2025 sregs->cr0 = vcpu->cr0;
2026 sregs->cr2 = vcpu->cr2;
2027 sregs->cr3 = vcpu->cr3;
2028 sregs->cr4 = vcpu->cr4;
7017fc3d 2029 sregs->cr8 = get_cr8(vcpu);
6aa8b732 2030 sregs->efer = vcpu->shadow_efer;
7017fc3d 2031 sregs->apic_base = kvm_get_apic_base(vcpu);
6aa8b732 2032
2a8067f1 2033 if (irqchip_in_kernel(vcpu->kvm)) {
c52fb35a
HQ
2034 memset(sregs->interrupt_bitmap, 0,
2035 sizeof sregs->interrupt_bitmap);
cbdd1bea 2036 pending_vec = kvm_x86_ops->get_irq(vcpu);
2a8067f1 2037 if (pending_vec >= 0)
d77c26fc
MD
2038 set_bit(pending_vec,
2039 (unsigned long *)sregs->interrupt_bitmap);
2a8067f1 2040 } else
c52fb35a
HQ
2041 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2042 sizeof sregs->interrupt_bitmap);
6aa8b732
AK
2043
2044 vcpu_put(vcpu);
2045
2046 return 0;
2047}
2048
2049static void set_segment(struct kvm_vcpu *vcpu,
2050 struct kvm_segment *var, int seg)
2051{
cbdd1bea 2052 return kvm_x86_ops->set_segment(vcpu, var, seg);
6aa8b732
AK
2053}
2054
bccf2150
AK
2055static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2056 struct kvm_sregs *sregs)
6aa8b732 2057{
6aa8b732 2058 int mmu_reset_needed = 0;
2a8067f1 2059 int i, pending_vec, max_bits;
6aa8b732
AK
2060 struct descriptor_table dt;
2061
bccf2150 2062 vcpu_load(vcpu);
6aa8b732 2063
6aa8b732
AK
2064 dt.limit = sregs->idt.limit;
2065 dt.base = sregs->idt.base;
cbdd1bea 2066 kvm_x86_ops->set_idt(vcpu, &dt);
6aa8b732
AK
2067 dt.limit = sregs->gdt.limit;
2068 dt.base = sregs->gdt.base;
cbdd1bea 2069 kvm_x86_ops->set_gdt(vcpu, &dt);
6aa8b732
AK
2070
2071 vcpu->cr2 = sregs->cr2;
2072 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2073 vcpu->cr3 = sregs->cr3;
2074
7017fc3d 2075 set_cr8(vcpu, sregs->cr8);
6aa8b732
AK
2076
2077 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
05b3e0c2 2078#ifdef CONFIG_X86_64
cbdd1bea 2079 kvm_x86_ops->set_efer(vcpu, sregs->efer);
6aa8b732 2080#endif
7017fc3d 2081 kvm_set_apic_base(vcpu, sregs->apic_base);
6aa8b732 2082
cbdd1bea 2083 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
399badf3 2084
6aa8b732 2085 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
81f50e3b 2086 vcpu->cr0 = sregs->cr0;
cbdd1bea 2087 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
6aa8b732
AK
2088
2089 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
cbdd1bea 2090 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
1b0973bd
AK
2091 if (!is_long_mode(vcpu) && is_pae(vcpu))
2092 load_pdptrs(vcpu, vcpu->cr3);
6aa8b732
AK
2093
2094 if (mmu_reset_needed)
2095 kvm_mmu_reset_context(vcpu);
2096
c52fb35a
HQ
2097 if (!irqchip_in_kernel(vcpu->kvm)) {
2098 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2099 sizeof vcpu->irq_pending);
2100 vcpu->irq_summary = 0;
2101 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2102 if (vcpu->irq_pending[i])
2103 __set_bit(i, &vcpu->irq_summary);
2a8067f1
ED
2104 } else {
2105 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2106 pending_vec = find_first_bit(
2107 (const unsigned long *)sregs->interrupt_bitmap,
2108 max_bits);
2109 /* Only pending external irq is handled here */
2110 if (pending_vec < max_bits) {
cbdd1bea 2111 kvm_x86_ops->set_irq(vcpu, pending_vec);
d77c26fc
MD
2112 pr_debug("Set back pending irq %d\n",
2113 pending_vec);
2a8067f1 2114 }
c52fb35a 2115 }
6aa8b732 2116
024aa1c0
AK
2117 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2118 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2119 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2120 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2121 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2122 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2123
2124 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2125 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2126
6aa8b732
AK
2127 vcpu_put(vcpu);
2128
2129 return 0;
2130}
2131
1747fb71
RR
2132void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2133{
2134 struct kvm_segment cs;
2135
2136 get_segment(vcpu, &cs, VCPU_SREG_CS);
2137 *db = cs.db;
2138 *l = cs.l;
2139}
2140EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2141
6aa8b732
AK
2142/*
2143 * Translate a guest virtual address to a guest physical address.
2144 */
bccf2150
AK
2145static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2146 struct kvm_translation *tr)
6aa8b732
AK
2147{
2148 unsigned long vaddr = tr->linear_address;
6aa8b732
AK
2149 gpa_t gpa;
2150
bccf2150 2151 vcpu_load(vcpu);
11ec2804 2152 mutex_lock(&vcpu->kvm->lock);
6aa8b732
AK
2153 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2154 tr->physical_address = gpa;
2155 tr->valid = gpa != UNMAPPED_GVA;
2156 tr->writeable = 1;
2157 tr->usermode = 0;
11ec2804 2158 mutex_unlock(&vcpu->kvm->lock);
6aa8b732
AK
2159 vcpu_put(vcpu);
2160
2161 return 0;
2162}
2163
bccf2150
AK
2164static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2165 struct kvm_interrupt *irq)
6aa8b732 2166{
6aa8b732
AK
2167 if (irq->irq < 0 || irq->irq >= 256)
2168 return -EINVAL;
97222cc8
ED
2169 if (irqchip_in_kernel(vcpu->kvm))
2170 return -ENXIO;
bccf2150 2171 vcpu_load(vcpu);
6aa8b732
AK
2172
2173 set_bit(irq->irq, vcpu->irq_pending);
2174 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2175
2176 vcpu_put(vcpu);
2177
2178 return 0;
2179}
2180
bccf2150
AK
2181static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2182 struct kvm_debug_guest *dbg)
6aa8b732 2183{
6aa8b732
AK
2184 int r;
2185
bccf2150 2186 vcpu_load(vcpu);
6aa8b732 2187
cbdd1bea 2188 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
6aa8b732
AK
2189
2190 vcpu_put(vcpu);
2191
2192 return r;
2193}
2194
9a2bb7f4
AK
2195static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2196 unsigned long address,
2197 int *type)
2198{
2199 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2200 unsigned long pgoff;
2201 struct page *page;
2202
9a2bb7f4 2203 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
2204 if (pgoff == 0)
2205 page = virt_to_page(vcpu->run);
2206 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2207 page = virt_to_page(vcpu->pio_data);
2208 else
9a2bb7f4 2209 return NOPAGE_SIGBUS;
9a2bb7f4 2210 get_page(page);
cd0d9137
NAQ
2211 if (type != NULL)
2212 *type = VM_FAULT_MINOR;
2213
9a2bb7f4
AK
2214 return page;
2215}
2216
2217static struct vm_operations_struct kvm_vcpu_vm_ops = {
2218 .nopage = kvm_vcpu_nopage,
2219};
2220
2221static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2222{
2223 vma->vm_ops = &kvm_vcpu_vm_ops;
2224 return 0;
2225}
2226
bccf2150
AK
2227static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2228{
2229 struct kvm_vcpu *vcpu = filp->private_data;
2230
2231 fput(vcpu->kvm->filp);
2232 return 0;
2233}
2234
2235static struct file_operations kvm_vcpu_fops = {
2236 .release = kvm_vcpu_release,
2237 .unlocked_ioctl = kvm_vcpu_ioctl,
2238 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 2239 .mmap = kvm_vcpu_mmap,
bccf2150
AK
2240};
2241
2242/*
2243 * Allocates an inode for the vcpu.
2244 */
2245static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2246{
2247 int fd, r;
2248 struct inode *inode;
2249 struct file *file;
2250
d6d28168
AK
2251 r = anon_inode_getfd(&fd, &inode, &file,
2252 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2253 if (r)
2254 return r;
bccf2150 2255 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 2256 return fd;
bccf2150
AK
2257}
2258
c5ea7660
AK
2259/*
2260 * Creates some virtual cpus. Good luck creating more than one.
2261 */
2262static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2263{
2264 int r;
2265 struct kvm_vcpu *vcpu;
2266
c5ea7660 2267 if (!valid_vcpu(n))
fb3f0f51 2268 return -EINVAL;
c5ea7660 2269
cbdd1bea 2270 vcpu = kvm_x86_ops->vcpu_create(kvm, n);
fb3f0f51
RR
2271 if (IS_ERR(vcpu))
2272 return PTR_ERR(vcpu);
c5ea7660 2273
15ad7146
AK
2274 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2275
b114b080
RR
2276 /* We do fxsave: this must be aligned. */
2277 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2278
fb3f0f51 2279 vcpu_load(vcpu);
e00c8cf2
AK
2280 r = kvm_x86_ops->vcpu_reset(vcpu);
2281 if (r == 0)
2282 r = kvm_mmu_setup(vcpu);
c5ea7660 2283 vcpu_put(vcpu);
c5ea7660 2284 if (r < 0)
fb3f0f51
RR
2285 goto free_vcpu;
2286
11ec2804 2287 mutex_lock(&kvm->lock);
fb3f0f51
RR
2288 if (kvm->vcpus[n]) {
2289 r = -EEXIST;
11ec2804 2290 mutex_unlock(&kvm->lock);
fb3f0f51
RR
2291 goto mmu_unload;
2292 }
2293 kvm->vcpus[n] = vcpu;
11ec2804 2294 mutex_unlock(&kvm->lock);
c5ea7660 2295
fb3f0f51 2296 /* Now it's all set up, let userspace reach it */
bccf2150
AK
2297 r = create_vcpu_fd(vcpu);
2298 if (r < 0)
fb3f0f51
RR
2299 goto unlink;
2300 return r;
39c3b86e 2301
fb3f0f51 2302unlink:
11ec2804 2303 mutex_lock(&kvm->lock);
fb3f0f51 2304 kvm->vcpus[n] = NULL;
11ec2804 2305 mutex_unlock(&kvm->lock);
a2fa3e9f 2306
fb3f0f51
RR
2307mmu_unload:
2308 vcpu_load(vcpu);
2309 kvm_mmu_unload(vcpu);
2310 vcpu_put(vcpu);
c5ea7660 2311
fb3f0f51 2312free_vcpu:
cbdd1bea 2313 kvm_x86_ops->vcpu_free(vcpu);
c5ea7660
AK
2314 return r;
2315}
2316
1961d276
AK
2317static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2318{
2319 if (sigset) {
2320 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2321 vcpu->sigset_active = 1;
2322 vcpu->sigset = *sigset;
2323 } else
2324 vcpu->sigset_active = 0;
2325 return 0;
2326}
2327
b8836737
AK
2328/*
2329 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2330 * we have asm/x86/processor.h
2331 */
2332struct fxsave {
2333 u16 cwd;
2334 u16 swd;
2335 u16 twd;
2336 u16 fop;
2337 u64 rip;
2338 u64 rdp;
2339 u32 mxcsr;
2340 u32 mxcsr_mask;
2341 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2342#ifdef CONFIG_X86_64
2343 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2344#else
2345 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2346#endif
2347};
2348
2349static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2350{
b114b080 2351 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
b8836737
AK
2352
2353 vcpu_load(vcpu);
2354
2355 memcpy(fpu->fpr, fxsave->st_space, 128);
2356 fpu->fcw = fxsave->cwd;
2357 fpu->fsw = fxsave->swd;
2358 fpu->ftwx = fxsave->twd;
2359 fpu->last_opcode = fxsave->fop;
2360 fpu->last_ip = fxsave->rip;
2361 fpu->last_dp = fxsave->rdp;
2362 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2363
2364 vcpu_put(vcpu);
2365
2366 return 0;
2367}
2368
2369static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2370{
b114b080 2371 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
b8836737
AK
2372
2373 vcpu_load(vcpu);
2374
2375 memcpy(fxsave->st_space, fpu->fpr, 128);
2376 fxsave->cwd = fpu->fcw;
2377 fxsave->swd = fpu->fsw;
2378 fxsave->twd = fpu->ftwx;
2379 fxsave->fop = fpu->last_opcode;
2380 fxsave->rip = fpu->last_ip;
2381 fxsave->rdp = fpu->last_dp;
2382 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2383
2384 vcpu_put(vcpu);
2385
2386 return 0;
2387}
2388
bccf2150
AK
2389static long kvm_vcpu_ioctl(struct file *filp,
2390 unsigned int ioctl, unsigned long arg)
6aa8b732 2391{
bccf2150 2392 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 2393 void __user *argp = (void __user *)arg;
313a3dc7 2394 int r;
6aa8b732
AK
2395
2396 switch (ioctl) {
9a2bb7f4 2397 case KVM_RUN:
f0fe5108
AK
2398 r = -EINVAL;
2399 if (arg)
2400 goto out;
9a2bb7f4 2401 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 2402 break;
6aa8b732
AK
2403 case KVM_GET_REGS: {
2404 struct kvm_regs kvm_regs;
2405
bccf2150
AK
2406 memset(&kvm_regs, 0, sizeof kvm_regs);
2407 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
2408 if (r)
2409 goto out;
2410 r = -EFAULT;
2f366987 2411 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
2412 goto out;
2413 r = 0;
2414 break;
2415 }
2416 case KVM_SET_REGS: {
2417 struct kvm_regs kvm_regs;
2418
2419 r = -EFAULT;
2f366987 2420 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 2421 goto out;
bccf2150 2422 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
2423 if (r)
2424 goto out;
2425 r = 0;
2426 break;
2427 }
2428 case KVM_GET_SREGS: {
2429 struct kvm_sregs kvm_sregs;
2430
bccf2150
AK
2431 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2432 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
2433 if (r)
2434 goto out;
2435 r = -EFAULT;
2f366987 2436 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
2437 goto out;
2438 r = 0;
2439 break;
2440 }
2441 case KVM_SET_SREGS: {
2442 struct kvm_sregs kvm_sregs;
2443
2444 r = -EFAULT;
2f366987 2445 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 2446 goto out;
bccf2150 2447 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
2448 if (r)
2449 goto out;
2450 r = 0;
2451 break;
2452 }
2453 case KVM_TRANSLATE: {
2454 struct kvm_translation tr;
2455
2456 r = -EFAULT;
2f366987 2457 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 2458 goto out;
bccf2150 2459 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
2460 if (r)
2461 goto out;
2462 r = -EFAULT;
2f366987 2463 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
2464 goto out;
2465 r = 0;
2466 break;
2467 }
2468 case KVM_INTERRUPT: {
2469 struct kvm_interrupt irq;
2470
2471 r = -EFAULT;
2f366987 2472 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 2473 goto out;
bccf2150 2474 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
2475 if (r)
2476 goto out;
2477 r = 0;
2478 break;
2479 }
2480 case KVM_DEBUG_GUEST: {
2481 struct kvm_debug_guest dbg;
2482
2483 r = -EFAULT;
2f366987 2484 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 2485 goto out;
bccf2150 2486 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
2487 if (r)
2488 goto out;
2489 r = 0;
2490 break;
2491 }
1961d276
AK
2492 case KVM_SET_SIGNAL_MASK: {
2493 struct kvm_signal_mask __user *sigmask_arg = argp;
2494 struct kvm_signal_mask kvm_sigmask;
2495 sigset_t sigset, *p;
2496
2497 p = NULL;
2498 if (argp) {
2499 r = -EFAULT;
2500 if (copy_from_user(&kvm_sigmask, argp,
2501 sizeof kvm_sigmask))
2502 goto out;
2503 r = -EINVAL;
2504 if (kvm_sigmask.len != sizeof sigset)
2505 goto out;
2506 r = -EFAULT;
2507 if (copy_from_user(&sigset, sigmask_arg->sigset,
2508 sizeof sigset))
2509 goto out;
2510 p = &sigset;
2511 }
2512 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2513 break;
2514 }
b8836737
AK
2515 case KVM_GET_FPU: {
2516 struct kvm_fpu fpu;
2517
2518 memset(&fpu, 0, sizeof fpu);
2519 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
2520 if (r)
2521 goto out;
2522 r = -EFAULT;
2523 if (copy_to_user(argp, &fpu, sizeof fpu))
2524 goto out;
2525 r = 0;
2526 break;
2527 }
2528 case KVM_SET_FPU: {
2529 struct kvm_fpu fpu;
2530
2531 r = -EFAULT;
2532 if (copy_from_user(&fpu, argp, sizeof fpu))
2533 goto out;
2534 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
2535 if (r)
2536 goto out;
2537 r = 0;
2538 break;
2539 }
bccf2150 2540 default:
313a3dc7 2541 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
2542 }
2543out:
2544 return r;
2545}
2546
2547static long kvm_vm_ioctl(struct file *filp,
2548 unsigned int ioctl, unsigned long arg)
2549{
2550 struct kvm *kvm = filp->private_data;
2551 void __user *argp = (void __user *)arg;
1fe779f8 2552 int r;
bccf2150
AK
2553
2554 switch (ioctl) {
2555 case KVM_CREATE_VCPU:
2556 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2557 if (r < 0)
2558 goto out;
2559 break;
6fc138d2
IE
2560 case KVM_SET_USER_MEMORY_REGION: {
2561 struct kvm_userspace_memory_region kvm_userspace_mem;
2562
2563 r = -EFAULT;
2564 if (copy_from_user(&kvm_userspace_mem, argp,
2565 sizeof kvm_userspace_mem))
2566 goto out;
2567
2568 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
2569 if (r)
2570 goto out;
2571 break;
2572 }
2573 case KVM_GET_DIRTY_LOG: {
2574 struct kvm_dirty_log log;
2575
2576 r = -EFAULT;
2f366987 2577 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2578 goto out;
2c6f5df9 2579 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2580 if (r)
2581 goto out;
2582 break;
2583 }
f17abe9a 2584 default:
1fe779f8 2585 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
2586 }
2587out:
2588 return r;
2589}
2590
2591static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2592 unsigned long address,
2593 int *type)
2594{
2595 struct kvm *kvm = vma->vm_file->private_data;
2596 unsigned long pgoff;
f17abe9a
AK
2597 struct page *page;
2598
f17abe9a 2599 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
2600 if (!kvm_is_visible_gfn(kvm, pgoff))
2601 return NOPAGE_SIGBUS;
954bbbc2 2602 page = gfn_to_page(kvm, pgoff);
8a7ae055
IE
2603 if (is_error_page(page)) {
2604 kvm_release_page(page);
f17abe9a 2605 return NOPAGE_SIGBUS;
8a7ae055 2606 }
cd0d9137
NAQ
2607 if (type != NULL)
2608 *type = VM_FAULT_MINOR;
2609
f17abe9a
AK
2610 return page;
2611}
2612
2613static struct vm_operations_struct kvm_vm_vm_ops = {
2614 .nopage = kvm_vm_nopage,
2615};
2616
2617static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2618{
2619 vma->vm_ops = &kvm_vm_vm_ops;
2620 return 0;
2621}
2622
2623static struct file_operations kvm_vm_fops = {
2624 .release = kvm_vm_release,
2625 .unlocked_ioctl = kvm_vm_ioctl,
2626 .compat_ioctl = kvm_vm_ioctl,
2627 .mmap = kvm_vm_mmap,
2628};
2629
2630static int kvm_dev_ioctl_create_vm(void)
2631{
2632 int fd, r;
2633 struct inode *inode;
2634 struct file *file;
2635 struct kvm *kvm;
2636
f17abe9a 2637 kvm = kvm_create_vm();
d6d28168
AK
2638 if (IS_ERR(kvm))
2639 return PTR_ERR(kvm);
2640 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2641 if (r) {
2642 kvm_destroy_vm(kvm);
2643 return r;
f17abe9a
AK
2644 }
2645
bccf2150 2646 kvm->filp = file;
f17abe9a 2647
f17abe9a 2648 return fd;
f17abe9a
AK
2649}
2650
2651static long kvm_dev_ioctl(struct file *filp,
2652 unsigned int ioctl, unsigned long arg)
2653{
2654 void __user *argp = (void __user *)arg;
07c45a36 2655 long r = -EINVAL;
f17abe9a
AK
2656
2657 switch (ioctl) {
2658 case KVM_GET_API_VERSION:
f0fe5108
AK
2659 r = -EINVAL;
2660 if (arg)
2661 goto out;
f17abe9a
AK
2662 r = KVM_API_VERSION;
2663 break;
2664 case KVM_CREATE_VM:
f0fe5108
AK
2665 r = -EINVAL;
2666 if (arg)
2667 goto out;
f17abe9a
AK
2668 r = kvm_dev_ioctl_create_vm();
2669 break;
85f455f7
ED
2670 case KVM_CHECK_EXTENSION: {
2671 int ext = (long)argp;
2672
2673 switch (ext) {
2674 case KVM_CAP_IRQCHIP:
b6958ce4 2675 case KVM_CAP_HLT:
82ce2c96 2676 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
6fc138d2 2677 case KVM_CAP_USER_MEMORY:
cbc94022 2678 case KVM_CAP_SET_TSS_ADDR:
85f455f7
ED
2679 r = 1;
2680 break;
2681 default:
2682 r = 0;
2683 break;
2684 }
5d308f45 2685 break;
85f455f7 2686 }
07c45a36
AK
2687 case KVM_GET_VCPU_MMAP_SIZE:
2688 r = -EINVAL;
2689 if (arg)
2690 goto out;
039576c0 2691 r = 2 * PAGE_SIZE;
07c45a36 2692 break;
6aa8b732 2693 default:
043405e1 2694 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
2695 }
2696out:
2697 return r;
2698}
2699
6aa8b732 2700static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
2701 .unlocked_ioctl = kvm_dev_ioctl,
2702 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
2703};
2704
2705static struct miscdevice kvm_dev = {
bbe4432e 2706 KVM_MINOR,
6aa8b732
AK
2707 "kvm",
2708 &kvm_chardev_ops,
2709};
2710
774c47f1
AK
2711/*
2712 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2713 * cached on it.
2714 */
2715static void decache_vcpus_on_cpu(int cpu)
2716{
2717 struct kvm *vm;
2718 struct kvm_vcpu *vcpu;
2719 int i;
2720
2721 spin_lock(&kvm_lock);
11ec2804 2722 list_for_each_entry(vm, &vm_list, vm_list)
774c47f1 2723 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
2724 vcpu = vm->vcpus[i];
2725 if (!vcpu)
2726 continue;
774c47f1
AK
2727 /*
2728 * If the vcpu is locked, then it is running on some
2729 * other cpu and therefore it is not cached on the
2730 * cpu in question.
2731 *
2732 * If it's not locked, check the last cpu it executed
2733 * on.
2734 */
2735 if (mutex_trylock(&vcpu->mutex)) {
2736 if (vcpu->cpu == cpu) {
cbdd1bea 2737 kvm_x86_ops->vcpu_decache(vcpu);
774c47f1
AK
2738 vcpu->cpu = -1;
2739 }
2740 mutex_unlock(&vcpu->mutex);
2741 }
2742 }
2743 spin_unlock(&kvm_lock);
2744}
2745
1b6c0168
AK
2746static void hardware_enable(void *junk)
2747{
2748 int cpu = raw_smp_processor_id();
2749
2750 if (cpu_isset(cpu, cpus_hardware_enabled))
2751 return;
2752 cpu_set(cpu, cpus_hardware_enabled);
cbdd1bea 2753 kvm_x86_ops->hardware_enable(NULL);
1b6c0168
AK
2754}
2755
2756static void hardware_disable(void *junk)
2757{
2758 int cpu = raw_smp_processor_id();
2759
2760 if (!cpu_isset(cpu, cpus_hardware_enabled))
2761 return;
2762 cpu_clear(cpu, cpus_hardware_enabled);
2763 decache_vcpus_on_cpu(cpu);
cbdd1bea 2764 kvm_x86_ops->hardware_disable(NULL);
1b6c0168
AK
2765}
2766
774c47f1
AK
2767static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2768 void *v)
2769{
2770 int cpu = (long)v;
2771
2772 switch (val) {
cec9ad27
AK
2773 case CPU_DYING:
2774 case CPU_DYING_FROZEN:
6ec8a856
AK
2775 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2776 cpu);
2777 hardware_disable(NULL);
2778 break;
774c47f1 2779 case CPU_UP_CANCELED:
8bb78442 2780 case CPU_UP_CANCELED_FROZEN:
43934a38
JK
2781 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2782 cpu);
1b6c0168 2783 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 2784 break;
43934a38 2785 case CPU_ONLINE:
8bb78442 2786 case CPU_ONLINE_FROZEN:
43934a38
JK
2787 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2788 cpu);
1b6c0168 2789 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
2790 break;
2791 }
2792 return NOTIFY_OK;
2793}
2794
9a2b85c6 2795static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2796 void *v)
9a2b85c6
RR
2797{
2798 if (val == SYS_RESTART) {
2799 /*
2800 * Some (well, at least mine) BIOSes hang on reboot if
2801 * in vmx root mode.
2802 */
2803 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2804 on_each_cpu(hardware_disable, NULL, 0, 1);
2805 }
2806 return NOTIFY_OK;
2807}
2808
2809static struct notifier_block kvm_reboot_notifier = {
2810 .notifier_call = kvm_reboot,
2811 .priority = 0,
2812};
2813
2eeb2e94
GH
2814void kvm_io_bus_init(struct kvm_io_bus *bus)
2815{
2816 memset(bus, 0, sizeof(*bus));
2817}
2818
2819void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2820{
2821 int i;
2822
2823 for (i = 0; i < bus->dev_count; i++) {
2824 struct kvm_io_device *pos = bus->devs[i];
2825
2826 kvm_iodevice_destructor(pos);
2827 }
2828}
2829
2830struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
2831{
2832 int i;
2833
2834 for (i = 0; i < bus->dev_count; i++) {
2835 struct kvm_io_device *pos = bus->devs[i];
2836
2837 if (pos->in_range(pos, addr))
2838 return pos;
2839 }
2840
2841 return NULL;
2842}
2843
2844void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2845{
2846 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2847
2848 bus->devs[bus->dev_count++] = dev;
2849}
2850
774c47f1
AK
2851static struct notifier_block kvm_cpu_notifier = {
2852 .notifier_call = kvm_cpu_hotplug,
2853 .priority = 20, /* must be > scheduler priority */
2854};
2855
1165f5fe
AK
2856static u64 stat_get(void *_offset)
2857{
2858 unsigned offset = (long)_offset;
2859 u64 total = 0;
2860 struct kvm *kvm;
2861 struct kvm_vcpu *vcpu;
2862 int i;
2863
2864 spin_lock(&kvm_lock);
2865 list_for_each_entry(kvm, &vm_list, vm_list)
2866 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
2867 vcpu = kvm->vcpus[i];
2868 if (vcpu)
2869 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
2870 }
2871 spin_unlock(&kvm_lock);
2872 return total;
2873}
2874
3dea7ca7 2875DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
1165f5fe 2876
6aa8b732
AK
2877static __init void kvm_init_debug(void)
2878{
2879 struct kvm_stats_debugfs_item *p;
2880
8b6d44c7 2881 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2882 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
2883 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
2884 (void *)(long)p->offset,
2885 &stat_fops);
6aa8b732
AK
2886}
2887
2888static void kvm_exit_debug(void)
2889{
2890 struct kvm_stats_debugfs_item *p;
2891
2892 for (p = debugfs_entries; p->name; ++p)
2893 debugfs_remove(p->dentry);
2894 debugfs_remove(debugfs_dir);
2895}
2896
59ae6c6b
AK
2897static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2898{
4267c41a 2899 hardware_disable(NULL);
59ae6c6b
AK
2900 return 0;
2901}
2902
2903static int kvm_resume(struct sys_device *dev)
2904{
4267c41a 2905 hardware_enable(NULL);
59ae6c6b
AK
2906 return 0;
2907}
2908
2909static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2910 .name = "kvm",
59ae6c6b
AK
2911 .suspend = kvm_suspend,
2912 .resume = kvm_resume,
2913};
2914
2915static struct sys_device kvm_sysdev = {
2916 .id = 0,
2917 .cls = &kvm_sysdev_class,
2918};
2919
cea7bb21 2920struct page *bad_page;
6aa8b732 2921
15ad7146
AK
2922static inline
2923struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2924{
2925 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2926}
2927
2928static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2929{
2930 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2931
cbdd1bea 2932 kvm_x86_ops->vcpu_load(vcpu, cpu);
15ad7146
AK
2933}
2934
2935static void kvm_sched_out(struct preempt_notifier *pn,
2936 struct task_struct *next)
2937{
2938 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2939
cbdd1bea 2940 kvm_x86_ops->vcpu_put(vcpu);
15ad7146
AK
2941}
2942
cbdd1bea 2943int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
c16f862d 2944 struct module *module)
6aa8b732
AK
2945{
2946 int r;
002c7f7c 2947 int cpu;
6aa8b732 2948
cbdd1bea 2949 if (kvm_x86_ops) {
09db28b8
YI
2950 printk(KERN_ERR "kvm: already loaded the other module\n");
2951 return -EEXIST;
2952 }
2953
e097f35c 2954 if (!ops->cpu_has_kvm_support()) {
6aa8b732
AK
2955 printk(KERN_ERR "kvm: no hardware support\n");
2956 return -EOPNOTSUPP;
2957 }
e097f35c 2958 if (ops->disabled_by_bios()) {
6aa8b732
AK
2959 printk(KERN_ERR "kvm: disabled by bios\n");
2960 return -EOPNOTSUPP;
2961 }
2962
cbdd1bea 2963 kvm_x86_ops = ops;
e097f35c 2964
cbdd1bea 2965 r = kvm_x86_ops->hardware_setup();
6aa8b732 2966 if (r < 0)
ca45aaae 2967 goto out;
6aa8b732 2968
002c7f7c
YS
2969 for_each_online_cpu(cpu) {
2970 smp_call_function_single(cpu,
cbdd1bea 2971 kvm_x86_ops->check_processor_compatibility,
002c7f7c
YS
2972 &r, 0, 1);
2973 if (r < 0)
2974 goto out_free_0;
2975 }
2976
1b6c0168 2977 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
2978 r = register_cpu_notifier(&kvm_cpu_notifier);
2979 if (r)
2980 goto out_free_1;
6aa8b732
AK
2981 register_reboot_notifier(&kvm_reboot_notifier);
2982
59ae6c6b
AK
2983 r = sysdev_class_register(&kvm_sysdev_class);
2984 if (r)
2985 goto out_free_2;
2986
2987 r = sysdev_register(&kvm_sysdev);
2988 if (r)
2989 goto out_free_3;
2990
c16f862d
RR
2991 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2992 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2993 __alignof__(struct kvm_vcpu), 0, 0);
2994 if (!kvm_vcpu_cache) {
2995 r = -ENOMEM;
2996 goto out_free_4;
2997 }
2998
6aa8b732
AK
2999 kvm_chardev_ops.owner = module;
3000
3001 r = misc_register(&kvm_dev);
3002 if (r) {
d77c26fc 3003 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
3004 goto out_free;
3005 }
3006
15ad7146
AK
3007 kvm_preempt_ops.sched_in = kvm_sched_in;
3008 kvm_preempt_ops.sched_out = kvm_sched_out;
3009
c7addb90
AK
3010 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3011
3012 return 0;
6aa8b732
AK
3013
3014out_free:
c16f862d
RR
3015 kmem_cache_destroy(kvm_vcpu_cache);
3016out_free_4:
59ae6c6b
AK
3017 sysdev_unregister(&kvm_sysdev);
3018out_free_3:
3019 sysdev_class_unregister(&kvm_sysdev_class);
3020out_free_2:
6aa8b732 3021 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
3022 unregister_cpu_notifier(&kvm_cpu_notifier);
3023out_free_1:
1b6c0168 3024 on_each_cpu(hardware_disable, NULL, 0, 1);
002c7f7c 3025out_free_0:
cbdd1bea 3026 kvm_x86_ops->hardware_unsetup();
ca45aaae 3027out:
cbdd1bea 3028 kvm_x86_ops = NULL;
6aa8b732
AK
3029 return r;
3030}
d77c26fc 3031EXPORT_SYMBOL_GPL(kvm_init_x86);
6aa8b732 3032
cbdd1bea 3033void kvm_exit_x86(void)
6aa8b732
AK
3034{
3035 misc_deregister(&kvm_dev);
c16f862d 3036 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
3037 sysdev_unregister(&kvm_sysdev);
3038 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 3039 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 3040 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 3041 on_each_cpu(hardware_disable, NULL, 0, 1);
cbdd1bea
CE
3042 kvm_x86_ops->hardware_unsetup();
3043 kvm_x86_ops = NULL;
6aa8b732 3044}
d77c26fc 3045EXPORT_SYMBOL_GPL(kvm_exit_x86);
6aa8b732
AK
3046
3047static __init int kvm_init(void)
3048{
37e29d90
AK
3049 int r;
3050
b5a33a75
AK
3051 r = kvm_mmu_module_init();
3052 if (r)
3053 goto out4;
3054
6aa8b732
AK
3055 kvm_init_debug();
3056
043405e1 3057 kvm_arch_init();
bf591b24 3058
cea7bb21 3059 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
d77c26fc
MD
3060
3061 if (bad_page == NULL) {
6aa8b732
AK
3062 r = -ENOMEM;
3063 goto out;
3064 }
3065
58e690e6 3066 return 0;
6aa8b732
AK
3067
3068out:
3069 kvm_exit_debug();
b5a33a75
AK
3070 kvm_mmu_module_exit();
3071out4:
6aa8b732
AK
3072 return r;
3073}
3074
3075static __exit void kvm_exit(void)
3076{
3077 kvm_exit_debug();
cea7bb21 3078 __free_page(bad_page);
b5a33a75 3079 kvm_mmu_module_exit();
6aa8b732
AK
3080}
3081
3082module_init(kvm_init)
3083module_exit(kvm_exit)