]> bbs.cooldavid.org Git - net-next-2.6.git/blame - virt/kvm/kvm_main.c
KVM: Enable 32bit dirty log pointers on 64bit host
[net-next-2.6.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6ff5894c 46#include <linux/compat.h>
6aa8b732 47
e495606d 48#include <asm/processor.h>
e495606d
AK
49#include <asm/io.h>
50#include <asm/uaccess.h>
3e021bf5 51#include <asm/pgtable.h>
6aa8b732 52
5f94c174
LV
53#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
54#include "coalesced_mmio.h"
55#endif
56
229456fc
MT
57#define CREATE_TRACE_POINTS
58#include <trace/events/kvm.h>
59
6aa8b732
AK
60MODULE_AUTHOR("Qumranet");
61MODULE_LICENSE("GPL");
62
fa40a821
MT
63/*
64 * Ordering of locks:
65 *
22fc0294 66 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
fa40a821
MT
67 */
68
e9b11c17
ZX
69DEFINE_SPINLOCK(kvm_lock);
70LIST_HEAD(vm_list);
133de902 71
7f59f492 72static cpumask_var_t cpus_hardware_enabled;
10474ae8
AG
73static int kvm_usage_count = 0;
74static atomic_t hardware_enable_failed;
1b6c0168 75
c16f862d
RR
76struct kmem_cache *kvm_vcpu_cache;
77EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 78
15ad7146
AK
79static __read_mostly struct preempt_ops kvm_preempt_ops;
80
76f7c879 81struct dentry *kvm_debugfs_dir;
6aa8b732 82
bccf2150
AK
83static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
84 unsigned long arg);
10474ae8
AG
85static int hardware_enable_all(void);
86static void hardware_disable_all(void);
bccf2150 87
e8ba5d31 88static bool kvm_rebooting;
4ecac3fd 89
54dee993
MT
90static bool largepages_enabled = true;
91
c77fb9dc 92inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 93{
fc5659c8
JR
94 if (pfn_valid(pfn)) {
95 struct page *page = compound_head(pfn_to_page(pfn));
96 return PageReserved(page);
97 }
cbff90a7
BAY
98
99 return true;
100}
101
bccf2150
AK
102/*
103 * Switches to specified vcpu, until a matching vcpu_put()
104 */
313a3dc7 105void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 106{
15ad7146
AK
107 int cpu;
108
bccf2150 109 mutex_lock(&vcpu->mutex);
15ad7146
AK
110 cpu = get_cpu();
111 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 112 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 113 put_cpu();
6aa8b732
AK
114}
115
313a3dc7 116void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 117{
15ad7146 118 preempt_disable();
313a3dc7 119 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
120 preempt_notifier_unregister(&vcpu->preempt_notifier);
121 preempt_enable();
6aa8b732
AK
122 mutex_unlock(&vcpu->mutex);
123}
124
d9e368d6
AK
125static void ack_flush(void *_completed)
126{
d9e368d6
AK
127}
128
49846896 129static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 130{
597a5f55 131 int i, cpu, me;
6ef7a1bc
RR
132 cpumask_var_t cpus;
133 bool called = true;
d9e368d6 134 struct kvm_vcpu *vcpu;
d9e368d6 135
79f55997 136 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 137
84261923 138 spin_lock(&kvm->requests_lock);
e601e3be 139 me = smp_processor_id();
988a2cae 140 kvm_for_each_vcpu(i, vcpu, kvm) {
49846896 141 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
142 continue;
143 cpu = vcpu->cpu;
6ef7a1bc
RR
144 if (cpus != NULL && cpu != -1 && cpu != me)
145 cpumask_set_cpu(cpu, cpus);
49846896 146 }
6ef7a1bc
RR
147 if (unlikely(cpus == NULL))
148 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
149 else if (!cpumask_empty(cpus))
150 smp_call_function_many(cpus, ack_flush, NULL, 1);
151 else
152 called = false;
84261923 153 spin_unlock(&kvm->requests_lock);
6ef7a1bc 154 free_cpumask_var(cpus);
49846896 155 return called;
d9e368d6
AK
156}
157
49846896 158void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 159{
49846896
RR
160 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
161 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
162}
163
49846896
RR
164void kvm_reload_remote_mmus(struct kvm *kvm)
165{
166 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
167}
2e53d63a 168
fb3f0f51
RR
169int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
170{
171 struct page *page;
172 int r;
173
174 mutex_init(&vcpu->mutex);
175 vcpu->cpu = -1;
fb3f0f51
RR
176 vcpu->kvm = kvm;
177 vcpu->vcpu_id = id;
b6958ce4 178 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
179
180 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
181 if (!page) {
182 r = -ENOMEM;
183 goto fail;
184 }
185 vcpu->run = page_address(page);
186
e9b11c17 187 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 188 if (r < 0)
e9b11c17 189 goto fail_free_run;
fb3f0f51
RR
190 return 0;
191
fb3f0f51
RR
192fail_free_run:
193 free_page((unsigned long)vcpu->run);
194fail:
76fafa5e 195 return r;
fb3f0f51
RR
196}
197EXPORT_SYMBOL_GPL(kvm_vcpu_init);
198
199void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
200{
e9b11c17 201 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
202 free_page((unsigned long)vcpu->run);
203}
204EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
205
e930bffe
AA
206#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
207static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
208{
209 return container_of(mn, struct kvm, mmu_notifier);
210}
211
212static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
213 struct mm_struct *mm,
214 unsigned long address)
215{
216 struct kvm *kvm = mmu_notifier_to_kvm(mn);
217 int need_tlb_flush;
218
219 /*
220 * When ->invalidate_page runs, the linux pte has been zapped
221 * already but the page is still allocated until
222 * ->invalidate_page returns. So if we increase the sequence
223 * here the kvm page fault will notice if the spte can't be
224 * established because the page is going to be freed. If
225 * instead the kvm page fault establishes the spte before
226 * ->invalidate_page runs, kvm_unmap_hva will release it
227 * before returning.
228 *
229 * The sequence increase only need to be seen at spin_unlock
230 * time, and not at spin_lock time.
231 *
232 * Increasing the sequence after the spin_unlock would be
233 * unsafe because the kvm page fault could then establish the
234 * pte after kvm_unmap_hva returned, without noticing the page
235 * is going to be freed.
236 */
237 spin_lock(&kvm->mmu_lock);
238 kvm->mmu_notifier_seq++;
239 need_tlb_flush = kvm_unmap_hva(kvm, address);
240 spin_unlock(&kvm->mmu_lock);
241
242 /* we've to flush the tlb before the pages can be freed */
243 if (need_tlb_flush)
244 kvm_flush_remote_tlbs(kvm);
245
246}
247
3da0dd43
IE
248static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
249 struct mm_struct *mm,
250 unsigned long address,
251 pte_t pte)
252{
253 struct kvm *kvm = mmu_notifier_to_kvm(mn);
254
255 spin_lock(&kvm->mmu_lock);
256 kvm->mmu_notifier_seq++;
257 kvm_set_spte_hva(kvm, address, pte);
258 spin_unlock(&kvm->mmu_lock);
259}
260
e930bffe
AA
261static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
262 struct mm_struct *mm,
263 unsigned long start,
264 unsigned long end)
265{
266 struct kvm *kvm = mmu_notifier_to_kvm(mn);
267 int need_tlb_flush = 0;
268
269 spin_lock(&kvm->mmu_lock);
270 /*
271 * The count increase must become visible at unlock time as no
272 * spte can be established without taking the mmu_lock and
273 * count is also read inside the mmu_lock critical section.
274 */
275 kvm->mmu_notifier_count++;
276 for (; start < end; start += PAGE_SIZE)
277 need_tlb_flush |= kvm_unmap_hva(kvm, start);
278 spin_unlock(&kvm->mmu_lock);
279
280 /* we've to flush the tlb before the pages can be freed */
281 if (need_tlb_flush)
282 kvm_flush_remote_tlbs(kvm);
283}
284
285static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
286 struct mm_struct *mm,
287 unsigned long start,
288 unsigned long end)
289{
290 struct kvm *kvm = mmu_notifier_to_kvm(mn);
291
292 spin_lock(&kvm->mmu_lock);
293 /*
294 * This sequence increase will notify the kvm page fault that
295 * the page that is going to be mapped in the spte could have
296 * been freed.
297 */
298 kvm->mmu_notifier_seq++;
299 /*
300 * The above sequence increase must be visible before the
301 * below count decrease but both values are read by the kvm
302 * page fault under mmu_lock spinlock so we don't need to add
303 * a smb_wmb() here in between the two.
304 */
305 kvm->mmu_notifier_count--;
306 spin_unlock(&kvm->mmu_lock);
307
308 BUG_ON(kvm->mmu_notifier_count < 0);
309}
310
311static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
312 struct mm_struct *mm,
313 unsigned long address)
314{
315 struct kvm *kvm = mmu_notifier_to_kvm(mn);
316 int young;
317
318 spin_lock(&kvm->mmu_lock);
319 young = kvm_age_hva(kvm, address);
320 spin_unlock(&kvm->mmu_lock);
321
322 if (young)
323 kvm_flush_remote_tlbs(kvm);
324
325 return young;
326}
327
85db06e5
MT
328static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
329 struct mm_struct *mm)
330{
331 struct kvm *kvm = mmu_notifier_to_kvm(mn);
332 kvm_arch_flush_shadow(kvm);
333}
334
e930bffe
AA
335static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
336 .invalidate_page = kvm_mmu_notifier_invalidate_page,
337 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
338 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
339 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
3da0dd43 340 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 341 .release = kvm_mmu_notifier_release,
e930bffe
AA
342};
343#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
344
f17abe9a 345static struct kvm *kvm_create_vm(void)
6aa8b732 346{
10474ae8 347 int r = 0;
d19a9cd2 348 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
349#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
350 struct page *page;
351#endif
6aa8b732 352
d19a9cd2
ZX
353 if (IS_ERR(kvm))
354 goto out;
10474ae8
AG
355
356 r = hardware_enable_all();
357 if (r)
358 goto out_err_nodisable;
359
75858a84
AK
360#ifdef CONFIG_HAVE_KVM_IRQCHIP
361 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
136bdfee 362 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 363#endif
6aa8b732 364
5f94c174
LV
365#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
366 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
367 if (!page) {
10474ae8
AG
368 r = -ENOMEM;
369 goto out_err;
5f94c174
LV
370 }
371 kvm->coalesced_mmio_ring =
372 (struct kvm_coalesced_mmio_ring *)page_address(page);
373#endif
374
e930bffe
AA
375#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
376 {
e930bffe 377 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
10474ae8
AG
378 r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
379 if (r) {
e930bffe
AA
380#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
381 put_page(page);
382#endif
10474ae8 383 goto out_err;
e930bffe
AA
384 }
385 }
386#endif
387
6d4e4c4f
AK
388 kvm->mm = current->mm;
389 atomic_inc(&kvm->mm->mm_count);
aaee2c94 390 spin_lock_init(&kvm->mmu_lock);
84261923 391 spin_lock_init(&kvm->requests_lock);
74906345 392 kvm_io_bus_init(&kvm->pio_bus);
d34e6b17 393 kvm_eventfd_init(kvm);
11ec2804 394 mutex_init(&kvm->lock);
60eead79 395 mutex_init(&kvm->irq_lock);
2eeb2e94 396 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 397 init_rwsem(&kvm->slots_lock);
d39f13b0 398 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
399 spin_lock(&kvm_lock);
400 list_add(&kvm->vm_list, &vm_list);
401 spin_unlock(&kvm_lock);
5f94c174
LV
402#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
403 kvm_coalesced_mmio_init(kvm);
404#endif
d19a9cd2 405out:
f17abe9a 406 return kvm;
10474ae8
AG
407
408out_err:
409 hardware_disable_all();
410out_err_nodisable:
411 kfree(kvm);
412 return ERR_PTR(r);
f17abe9a
AK
413}
414
6aa8b732
AK
415/*
416 * Free any memory in @free but not in @dont.
417 */
418static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
419 struct kvm_memory_slot *dont)
420{
ec04b260
JR
421 int i;
422
290fc38d
IE
423 if (!dont || free->rmap != dont->rmap)
424 vfree(free->rmap);
6aa8b732
AK
425
426 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
427 vfree(free->dirty_bitmap);
428
ec04b260
JR
429
430 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
431 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
432 vfree(free->lpage_info[i]);
433 free->lpage_info[i] = NULL;
434 }
435 }
05da4558 436
6aa8b732 437 free->npages = 0;
8b6d44c7 438 free->dirty_bitmap = NULL;
8d4e1288 439 free->rmap = NULL;
6aa8b732
AK
440}
441
d19a9cd2 442void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
443{
444 int i;
445
446 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 447 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
448}
449
f17abe9a
AK
450static void kvm_destroy_vm(struct kvm *kvm)
451{
6d4e4c4f
AK
452 struct mm_struct *mm = kvm->mm;
453
ad8ba2cd 454 kvm_arch_sync_events(kvm);
133de902
AK
455 spin_lock(&kvm_lock);
456 list_del(&kvm->vm_list);
457 spin_unlock(&kvm_lock);
399ec807 458 kvm_free_irq_routing(kvm);
74906345 459 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 460 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
461#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
462 if (kvm->coalesced_mmio_ring != NULL)
463 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
464#endif
465#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
466 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
467#else
468 kvm_arch_flush_shadow(kvm);
5f94c174 469#endif
d19a9cd2 470 kvm_arch_destroy_vm(kvm);
10474ae8 471 hardware_disable_all();
6d4e4c4f 472 mmdrop(mm);
f17abe9a
AK
473}
474
d39f13b0
IE
475void kvm_get_kvm(struct kvm *kvm)
476{
477 atomic_inc(&kvm->users_count);
478}
479EXPORT_SYMBOL_GPL(kvm_get_kvm);
480
481void kvm_put_kvm(struct kvm *kvm)
482{
483 if (atomic_dec_and_test(&kvm->users_count))
484 kvm_destroy_vm(kvm);
485}
486EXPORT_SYMBOL_GPL(kvm_put_kvm);
487
488
f17abe9a
AK
489static int kvm_vm_release(struct inode *inode, struct file *filp)
490{
491 struct kvm *kvm = filp->private_data;
492
721eecbf
GH
493 kvm_irqfd_release(kvm);
494
d39f13b0 495 kvm_put_kvm(kvm);
6aa8b732
AK
496 return 0;
497}
498
6aa8b732
AK
499/*
500 * Allocate some memory and give it an address in the guest physical address
501 * space.
502 *
503 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 504 *
10589a46 505 * Must be called holding mmap_sem for write.
6aa8b732 506 */
f78e0e2e
SY
507int __kvm_set_memory_region(struct kvm *kvm,
508 struct kvm_userspace_memory_region *mem,
509 int user_alloc)
6aa8b732
AK
510{
511 int r;
512 gfn_t base_gfn;
28bcb112
HC
513 unsigned long npages;
514 unsigned long i;
6aa8b732
AK
515 struct kvm_memory_slot *memslot;
516 struct kvm_memory_slot old, new;
6aa8b732
AK
517
518 r = -EINVAL;
519 /* General sanity checks */
520 if (mem->memory_size & (PAGE_SIZE - 1))
521 goto out;
522 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
523 goto out;
e7cacd40 524 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 525 goto out;
e0d62c7f 526 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
527 goto out;
528 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
529 goto out;
530
531 memslot = &kvm->memslots[mem->slot];
532 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
533 npages = mem->memory_size >> PAGE_SHIFT;
534
535 if (!npages)
536 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
537
6aa8b732
AK
538 new = old = *memslot;
539
540 new.base_gfn = base_gfn;
541 new.npages = npages;
542 new.flags = mem->flags;
543
544 /* Disallow changing a memory slot's size. */
545 r = -EINVAL;
546 if (npages && old.npages && npages != old.npages)
f78e0e2e 547 goto out_free;
6aa8b732
AK
548
549 /* Check for overlaps */
550 r = -EEXIST;
551 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
552 struct kvm_memory_slot *s = &kvm->memslots[i];
553
4cd481f6 554 if (s == memslot || !s->npages)
6aa8b732
AK
555 continue;
556 if (!((base_gfn + npages <= s->base_gfn) ||
557 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 558 goto out_free;
6aa8b732 559 }
6aa8b732 560
6aa8b732
AK
561 /* Free page dirty bitmap if unneeded */
562 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 563 new.dirty_bitmap = NULL;
6aa8b732
AK
564
565 r = -ENOMEM;
566
567 /* Allocate if a slot is being created */
eff0114a 568#ifndef CONFIG_S390
8d4e1288 569 if (npages && !new.rmap) {
d77c26fc 570 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
571
572 if (!new.rmap)
f78e0e2e 573 goto out_free;
290fc38d 574
290fc38d 575 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 576
80b14b5b 577 new.user_alloc = user_alloc;
604b38ac
AA
578 /*
579 * hva_to_rmmap() serialzies with the mmu_lock and to be
580 * safe it has to ignore memslots with !user_alloc &&
581 * !userspace_addr.
582 */
583 if (user_alloc)
584 new.userspace_addr = mem->userspace_addr;
585 else
586 new.userspace_addr = 0;
6aa8b732 587 }
ec04b260
JR
588 if (!npages)
589 goto skip_lpage;
05da4558 590
ec04b260 591 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
28bcb112
HC
592 unsigned long ugfn;
593 unsigned long j;
594 int lpages;
ec04b260 595 int level = i + 2;
05da4558 596
ec04b260
JR
597 /* Avoid unused variable warning if no large pages */
598 (void)level;
599
600 if (new.lpage_info[i])
601 continue;
602
603 lpages = 1 + (base_gfn + npages - 1) /
604 KVM_PAGES_PER_HPAGE(level);
605 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
606
607 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
608
609 if (!new.lpage_info[i])
05da4558
MT
610 goto out_free;
611
ec04b260
JR
612 memset(new.lpage_info[i], 0,
613 lpages * sizeof(*new.lpage_info[i]));
05da4558 614
ec04b260
JR
615 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
616 new.lpage_info[i][0].write_count = 1;
617 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
618 new.lpage_info[i][lpages - 1].write_count = 1;
ac04527f
AK
619 ugfn = new.userspace_addr >> PAGE_SHIFT;
620 /*
621 * If the gfn and userspace address are not aligned wrt each
54dee993
MT
622 * other, or if explicitly asked to, disable large page
623 * support for this slot
ac04527f 624 */
ec04b260 625 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
54dee993 626 !largepages_enabled)
ec04b260
JR
627 for (j = 0; j < lpages; ++j)
628 new.lpage_info[i][j].write_count = 1;
05da4558 629 }
6aa8b732 630
ec04b260
JR
631skip_lpage:
632
6aa8b732
AK
633 /* Allocate page dirty bitmap if needed */
634 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
635 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
636
637 new.dirty_bitmap = vmalloc(dirty_bytes);
638 if (!new.dirty_bitmap)
f78e0e2e 639 goto out_free;
6aa8b732 640 memset(new.dirty_bitmap, 0, dirty_bytes);
e244584f
IE
641 if (old.npages)
642 kvm_arch_flush_shadow(kvm);
6aa8b732 643 }
3eea8437
CB
644#else /* not defined CONFIG_S390 */
645 new.user_alloc = user_alloc;
646 if (user_alloc)
647 new.userspace_addr = mem->userspace_addr;
eff0114a 648#endif /* not defined CONFIG_S390 */
6aa8b732 649
34d4cb8f
MT
650 if (!npages)
651 kvm_arch_flush_shadow(kvm);
652
604b38ac
AA
653 spin_lock(&kvm->mmu_lock);
654 if (mem->slot >= kvm->nmemslots)
655 kvm->nmemslots = mem->slot + 1;
656
3ad82a7e 657 *memslot = new;
604b38ac 658 spin_unlock(&kvm->mmu_lock);
3ad82a7e 659
0de10343
ZX
660 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
661 if (r) {
604b38ac 662 spin_lock(&kvm->mmu_lock);
0de10343 663 *memslot = old;
604b38ac 664 spin_unlock(&kvm->mmu_lock);
0de10343 665 goto out_free;
82ce2c96
IE
666 }
667
6f897248
GC
668 kvm_free_physmem_slot(&old, npages ? &new : NULL);
669 /* Slot deletion case: we have to update the current slot */
b43b1901 670 spin_lock(&kvm->mmu_lock);
6f897248
GC
671 if (!npages)
672 *memslot = old;
b43b1901 673 spin_unlock(&kvm->mmu_lock);
8a98f664 674#ifdef CONFIG_DMAR
62c476c7
BAY
675 /* map the pages in iommu page table */
676 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
677 if (r)
678 goto out;
8a98f664 679#endif
6aa8b732
AK
680 return 0;
681
f78e0e2e 682out_free:
6aa8b732
AK
683 kvm_free_physmem_slot(&new, &old);
684out:
685 return r;
210c7c4d
IE
686
687}
f78e0e2e
SY
688EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
689
690int kvm_set_memory_region(struct kvm *kvm,
691 struct kvm_userspace_memory_region *mem,
692 int user_alloc)
693{
694 int r;
695
72dc67a6 696 down_write(&kvm->slots_lock);
f78e0e2e 697 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 698 up_write(&kvm->slots_lock);
f78e0e2e
SY
699 return r;
700}
210c7c4d
IE
701EXPORT_SYMBOL_GPL(kvm_set_memory_region);
702
1fe779f8
CO
703int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
704 struct
705 kvm_userspace_memory_region *mem,
706 int user_alloc)
210c7c4d 707{
e0d62c7f
IE
708 if (mem->slot >= KVM_MEMORY_SLOTS)
709 return -EINVAL;
210c7c4d 710 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
711}
712
5bb064dc
ZX
713int kvm_get_dirty_log(struct kvm *kvm,
714 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
715{
716 struct kvm_memory_slot *memslot;
717 int r, i;
718 int n;
719 unsigned long any = 0;
720
6aa8b732
AK
721 r = -EINVAL;
722 if (log->slot >= KVM_MEMORY_SLOTS)
723 goto out;
724
725 memslot = &kvm->memslots[log->slot];
726 r = -ENOENT;
727 if (!memslot->dirty_bitmap)
728 goto out;
729
cd1a4a98 730 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 731
cd1a4a98 732 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
733 any = memslot->dirty_bitmap[i];
734
735 r = -EFAULT;
736 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
737 goto out;
738
5bb064dc
ZX
739 if (any)
740 *is_dirty = 1;
6aa8b732
AK
741
742 r = 0;
6aa8b732 743out:
6aa8b732
AK
744 return r;
745}
746
54dee993
MT
747void kvm_disable_largepages(void)
748{
749 largepages_enabled = false;
750}
751EXPORT_SYMBOL_GPL(kvm_disable_largepages);
752
cea7bb21
IE
753int is_error_page(struct page *page)
754{
755 return page == bad_page;
756}
757EXPORT_SYMBOL_GPL(is_error_page);
758
35149e21
AL
759int is_error_pfn(pfn_t pfn)
760{
761 return pfn == bad_pfn;
762}
763EXPORT_SYMBOL_GPL(is_error_pfn);
764
f9d46eb0
IE
765static inline unsigned long bad_hva(void)
766{
767 return PAGE_OFFSET;
768}
769
770int kvm_is_error_hva(unsigned long addr)
771{
772 return addr == bad_hva();
773}
774EXPORT_SYMBOL_GPL(kvm_is_error_hva);
775
2843099f 776struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
777{
778 int i;
779
780 for (i = 0; i < kvm->nmemslots; ++i) {
781 struct kvm_memory_slot *memslot = &kvm->memslots[i];
782
783 if (gfn >= memslot->base_gfn
784 && gfn < memslot->base_gfn + memslot->npages)
785 return memslot;
786 }
8b6d44c7 787 return NULL;
6aa8b732 788}
2843099f 789EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
790
791struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
792{
793 gfn = unalias_gfn(kvm, gfn);
2843099f 794 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 795}
6aa8b732 796
e0d62c7f
IE
797int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
798{
799 int i;
800
801 gfn = unalias_gfn(kvm, gfn);
802 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
803 struct kvm_memory_slot *memslot = &kvm->memslots[i];
804
805 if (gfn >= memslot->base_gfn
806 && gfn < memslot->base_gfn + memslot->npages)
807 return 1;
808 }
809 return 0;
810}
811EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
812
05da4558 813unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
814{
815 struct kvm_memory_slot *slot;
816
817 gfn = unalias_gfn(kvm, gfn);
2843099f 818 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
819 if (!slot)
820 return bad_hva();
821 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
822}
0d150298 823EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 824
35149e21 825pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 826{
8d4e1288 827 struct page *page[1];
539cb660 828 unsigned long addr;
8d4e1288 829 int npages;
2e2e3738 830 pfn_t pfn;
954bbbc2 831
60395224
AK
832 might_sleep();
833
539cb660
IE
834 addr = gfn_to_hva(kvm, gfn);
835 if (kvm_is_error_hva(addr)) {
8a7ae055 836 get_page(bad_page);
35149e21 837 return page_to_pfn(bad_page);
8a7ae055 838 }
8d4e1288 839
4c2155ce 840 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 841
2e2e3738
AL
842 if (unlikely(npages != 1)) {
843 struct vm_area_struct *vma;
844
4c2155ce 845 down_read(&current->mm->mmap_sem);
2e2e3738 846 vma = find_vma(current->mm, addr);
4c2155ce 847
2e2e3738
AL
848 if (vma == NULL || addr < vma->vm_start ||
849 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 850 up_read(&current->mm->mmap_sem);
2e2e3738
AL
851 get_page(bad_page);
852 return page_to_pfn(bad_page);
853 }
854
855 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 856 up_read(&current->mm->mmap_sem);
c77fb9dc 857 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
858 } else
859 pfn = page_to_pfn(page[0]);
8d4e1288 860
2e2e3738 861 return pfn;
35149e21
AL
862}
863
864EXPORT_SYMBOL_GPL(gfn_to_pfn);
865
866struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
867{
2e2e3738
AL
868 pfn_t pfn;
869
870 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 871 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
872 return pfn_to_page(pfn);
873
c77fb9dc 874 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
875
876 get_page(bad_page);
877 return bad_page;
954bbbc2 878}
aab61cc0 879
954bbbc2
AK
880EXPORT_SYMBOL_GPL(gfn_to_page);
881
b4231d61
IE
882void kvm_release_page_clean(struct page *page)
883{
35149e21 884 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
885}
886EXPORT_SYMBOL_GPL(kvm_release_page_clean);
887
35149e21
AL
888void kvm_release_pfn_clean(pfn_t pfn)
889{
c77fb9dc 890 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 891 put_page(pfn_to_page(pfn));
35149e21
AL
892}
893EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
894
b4231d61 895void kvm_release_page_dirty(struct page *page)
8a7ae055 896{
35149e21
AL
897 kvm_release_pfn_dirty(page_to_pfn(page));
898}
899EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
900
901void kvm_release_pfn_dirty(pfn_t pfn)
902{
903 kvm_set_pfn_dirty(pfn);
904 kvm_release_pfn_clean(pfn);
905}
906EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
907
908void kvm_set_page_dirty(struct page *page)
909{
910 kvm_set_pfn_dirty(page_to_pfn(page));
911}
912EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
913
914void kvm_set_pfn_dirty(pfn_t pfn)
915{
c77fb9dc 916 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
917 struct page *page = pfn_to_page(pfn);
918 if (!PageReserved(page))
919 SetPageDirty(page);
920 }
8a7ae055 921}
35149e21
AL
922EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
923
924void kvm_set_pfn_accessed(pfn_t pfn)
925{
c77fb9dc 926 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 927 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
928}
929EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
930
931void kvm_get_pfn(pfn_t pfn)
932{
c77fb9dc 933 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 934 get_page(pfn_to_page(pfn));
35149e21
AL
935}
936EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 937
195aefde
IE
938static int next_segment(unsigned long len, int offset)
939{
940 if (len > PAGE_SIZE - offset)
941 return PAGE_SIZE - offset;
942 else
943 return len;
944}
945
946int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
947 int len)
948{
e0506bcb
IE
949 int r;
950 unsigned long addr;
195aefde 951
e0506bcb
IE
952 addr = gfn_to_hva(kvm, gfn);
953 if (kvm_is_error_hva(addr))
954 return -EFAULT;
955 r = copy_from_user(data, (void __user *)addr + offset, len);
956 if (r)
195aefde 957 return -EFAULT;
195aefde
IE
958 return 0;
959}
960EXPORT_SYMBOL_GPL(kvm_read_guest_page);
961
962int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
963{
964 gfn_t gfn = gpa >> PAGE_SHIFT;
965 int seg;
966 int offset = offset_in_page(gpa);
967 int ret;
968
969 while ((seg = next_segment(len, offset)) != 0) {
970 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
971 if (ret < 0)
972 return ret;
973 offset = 0;
974 len -= seg;
975 data += seg;
976 ++gfn;
977 }
978 return 0;
979}
980EXPORT_SYMBOL_GPL(kvm_read_guest);
981
7ec54588
MT
982int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
983 unsigned long len)
984{
985 int r;
986 unsigned long addr;
987 gfn_t gfn = gpa >> PAGE_SHIFT;
988 int offset = offset_in_page(gpa);
989
990 addr = gfn_to_hva(kvm, gfn);
991 if (kvm_is_error_hva(addr))
992 return -EFAULT;
0aac03f0 993 pagefault_disable();
7ec54588 994 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 995 pagefault_enable();
7ec54588
MT
996 if (r)
997 return -EFAULT;
998 return 0;
999}
1000EXPORT_SYMBOL(kvm_read_guest_atomic);
1001
195aefde
IE
1002int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1003 int offset, int len)
1004{
e0506bcb
IE
1005 int r;
1006 unsigned long addr;
195aefde 1007
e0506bcb
IE
1008 addr = gfn_to_hva(kvm, gfn);
1009 if (kvm_is_error_hva(addr))
1010 return -EFAULT;
1011 r = copy_to_user((void __user *)addr + offset, data, len);
1012 if (r)
195aefde 1013 return -EFAULT;
195aefde
IE
1014 mark_page_dirty(kvm, gfn);
1015 return 0;
1016}
1017EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1018
1019int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1020 unsigned long len)
1021{
1022 gfn_t gfn = gpa >> PAGE_SHIFT;
1023 int seg;
1024 int offset = offset_in_page(gpa);
1025 int ret;
1026
1027 while ((seg = next_segment(len, offset)) != 0) {
1028 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1029 if (ret < 0)
1030 return ret;
1031 offset = 0;
1032 len -= seg;
1033 data += seg;
1034 ++gfn;
1035 }
1036 return 0;
1037}
1038
1039int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1040{
3e021bf5 1041 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1042}
1043EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1044
1045int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1046{
1047 gfn_t gfn = gpa >> PAGE_SHIFT;
1048 int seg;
1049 int offset = offset_in_page(gpa);
1050 int ret;
1051
1052 while ((seg = next_segment(len, offset)) != 0) {
1053 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1054 if (ret < 0)
1055 return ret;
1056 offset = 0;
1057 len -= seg;
1058 ++gfn;
1059 }
1060 return 0;
1061}
1062EXPORT_SYMBOL_GPL(kvm_clear_guest);
1063
6aa8b732
AK
1064void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1065{
31389947 1066 struct kvm_memory_slot *memslot;
6aa8b732 1067
3b6fff19 1068 gfn = unalias_gfn(kvm, gfn);
2843099f 1069 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1070 if (memslot && memslot->dirty_bitmap) {
1071 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1072
7e9d619d
RR
1073 /* avoid RMW */
1074 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1075 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1076 }
1077}
1078
b6958ce4
ED
1079/*
1080 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1081 */
8776e519 1082void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1083{
e5c239cf
MT
1084 DEFINE_WAIT(wait);
1085
1086 for (;;) {
1087 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1088
a1b37100 1089 if (kvm_arch_vcpu_runnable(vcpu)) {
d7690175 1090 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1091 break;
d7690175 1092 }
09cec754
GN
1093 if (kvm_cpu_has_pending_timer(vcpu))
1094 break;
e5c239cf
MT
1095 if (signal_pending(current))
1096 break;
1097
b6958ce4 1098 schedule();
b6958ce4 1099 }
d3bef15f 1100
e5c239cf 1101 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1102}
1103
6aa8b732
AK
1104void kvm_resched(struct kvm_vcpu *vcpu)
1105{
3fca0365
YD
1106 if (!need_resched())
1107 return;
6aa8b732 1108 cond_resched();
6aa8b732
AK
1109}
1110EXPORT_SYMBOL_GPL(kvm_resched);
1111
d255f4f2
ZE
1112void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1113{
1114 ktime_t expires;
1115 DEFINE_WAIT(wait);
1116
1117 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1118
1119 /* Sleep for 100 us, and hope lock-holder got scheduled */
1120 expires = ktime_add_ns(ktime_get(), 100000UL);
1121 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1122
1123 finish_wait(&vcpu->wq, &wait);
1124}
1125EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1126
e4a533a4 1127static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1128{
1129 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1130 struct page *page;
1131
e4a533a4 1132 if (vmf->pgoff == 0)
039576c0 1133 page = virt_to_page(vcpu->run);
09566765 1134#ifdef CONFIG_X86
e4a533a4 1135 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1136 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1137#endif
1138#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1139 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1140 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1141#endif
039576c0 1142 else
e4a533a4 1143 return VM_FAULT_SIGBUS;
9a2bb7f4 1144 get_page(page);
e4a533a4 1145 vmf->page = page;
1146 return 0;
9a2bb7f4
AK
1147}
1148
f0f37e2f 1149static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1150 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1151};
1152
1153static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1154{
1155 vma->vm_ops = &kvm_vcpu_vm_ops;
1156 return 0;
1157}
1158
bccf2150
AK
1159static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1160{
1161 struct kvm_vcpu *vcpu = filp->private_data;
1162
66c0b394 1163 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1164 return 0;
1165}
1166
3d3aab1b 1167static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1168 .release = kvm_vcpu_release,
1169 .unlocked_ioctl = kvm_vcpu_ioctl,
1170 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1171 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1172};
1173
1174/*
1175 * Allocates an inode for the vcpu.
1176 */
1177static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1178{
73880c80 1179 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
bccf2150
AK
1180}
1181
c5ea7660
AK
1182/*
1183 * Creates some virtual cpus. Good luck creating more than one.
1184 */
73880c80 1185static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1186{
1187 int r;
988a2cae 1188 struct kvm_vcpu *vcpu, *v;
c5ea7660 1189
73880c80 1190 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1191 if (IS_ERR(vcpu))
1192 return PTR_ERR(vcpu);
c5ea7660 1193
15ad7146
AK
1194 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1195
26e5215f
AK
1196 r = kvm_arch_vcpu_setup(vcpu);
1197 if (r)
7d8fece6 1198 return r;
26e5215f 1199
11ec2804 1200 mutex_lock(&kvm->lock);
73880c80
GN
1201 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1202 r = -EINVAL;
e9b11c17 1203 goto vcpu_destroy;
fb3f0f51 1204 }
73880c80 1205
988a2cae
GN
1206 kvm_for_each_vcpu(r, v, kvm)
1207 if (v->vcpu_id == id) {
73880c80
GN
1208 r = -EEXIST;
1209 goto vcpu_destroy;
1210 }
1211
1212 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1213
fb3f0f51 1214 /* Now it's all set up, let userspace reach it */
66c0b394 1215 kvm_get_kvm(kvm);
bccf2150 1216 r = create_vcpu_fd(vcpu);
73880c80
GN
1217 if (r < 0) {
1218 kvm_put_kvm(kvm);
1219 goto vcpu_destroy;
1220 }
1221
1222 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1223 smp_wmb();
1224 atomic_inc(&kvm->online_vcpus);
1225
1226#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1227 if (kvm->bsp_vcpu_id == id)
1228 kvm->bsp_vcpu = vcpu;
1229#endif
1230 mutex_unlock(&kvm->lock);
fb3f0f51 1231 return r;
39c3b86e 1232
e9b11c17 1233vcpu_destroy:
7d8fece6 1234 mutex_unlock(&kvm->lock);
d40ccc62 1235 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1236 return r;
1237}
1238
1961d276
AK
1239static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1240{
1241 if (sigset) {
1242 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1243 vcpu->sigset_active = 1;
1244 vcpu->sigset = *sigset;
1245 } else
1246 vcpu->sigset_active = 0;
1247 return 0;
1248}
1249
bccf2150
AK
1250static long kvm_vcpu_ioctl(struct file *filp,
1251 unsigned int ioctl, unsigned long arg)
6aa8b732 1252{
bccf2150 1253 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1254 void __user *argp = (void __user *)arg;
313a3dc7 1255 int r;
fa3795a7
DH
1256 struct kvm_fpu *fpu = NULL;
1257 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1258
6d4e4c4f
AK
1259 if (vcpu->kvm->mm != current->mm)
1260 return -EIO;
6aa8b732 1261 switch (ioctl) {
9a2bb7f4 1262 case KVM_RUN:
f0fe5108
AK
1263 r = -EINVAL;
1264 if (arg)
1265 goto out;
b6c7a5dc 1266 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1267 break;
6aa8b732 1268 case KVM_GET_REGS: {
3e4bb3ac 1269 struct kvm_regs *kvm_regs;
6aa8b732 1270
3e4bb3ac
XZ
1271 r = -ENOMEM;
1272 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1273 if (!kvm_regs)
6aa8b732 1274 goto out;
3e4bb3ac
XZ
1275 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1276 if (r)
1277 goto out_free1;
6aa8b732 1278 r = -EFAULT;
3e4bb3ac
XZ
1279 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1280 goto out_free1;
6aa8b732 1281 r = 0;
3e4bb3ac
XZ
1282out_free1:
1283 kfree(kvm_regs);
6aa8b732
AK
1284 break;
1285 }
1286 case KVM_SET_REGS: {
3e4bb3ac 1287 struct kvm_regs *kvm_regs;
6aa8b732 1288
3e4bb3ac
XZ
1289 r = -ENOMEM;
1290 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1291 if (!kvm_regs)
6aa8b732 1292 goto out;
3e4bb3ac
XZ
1293 r = -EFAULT;
1294 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1295 goto out_free2;
1296 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1297 if (r)
3e4bb3ac 1298 goto out_free2;
6aa8b732 1299 r = 0;
3e4bb3ac
XZ
1300out_free2:
1301 kfree(kvm_regs);
6aa8b732
AK
1302 break;
1303 }
1304 case KVM_GET_SREGS: {
fa3795a7
DH
1305 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1306 r = -ENOMEM;
1307 if (!kvm_sregs)
1308 goto out;
1309 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1310 if (r)
1311 goto out;
1312 r = -EFAULT;
fa3795a7 1313 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1314 goto out;
1315 r = 0;
1316 break;
1317 }
1318 case KVM_SET_SREGS: {
fa3795a7
DH
1319 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1320 r = -ENOMEM;
1321 if (!kvm_sregs)
1322 goto out;
6aa8b732 1323 r = -EFAULT;
fa3795a7 1324 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1325 goto out;
fa3795a7 1326 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1327 if (r)
1328 goto out;
1329 r = 0;
1330 break;
1331 }
62d9f0db
MT
1332 case KVM_GET_MP_STATE: {
1333 struct kvm_mp_state mp_state;
1334
1335 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1336 if (r)
1337 goto out;
1338 r = -EFAULT;
1339 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1340 goto out;
1341 r = 0;
1342 break;
1343 }
1344 case KVM_SET_MP_STATE: {
1345 struct kvm_mp_state mp_state;
1346
1347 r = -EFAULT;
1348 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1349 goto out;
1350 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1351 if (r)
1352 goto out;
1353 r = 0;
1354 break;
1355 }
6aa8b732
AK
1356 case KVM_TRANSLATE: {
1357 struct kvm_translation tr;
1358
1359 r = -EFAULT;
2f366987 1360 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1361 goto out;
8b006791 1362 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1363 if (r)
1364 goto out;
1365 r = -EFAULT;
2f366987 1366 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1367 goto out;
1368 r = 0;
1369 break;
1370 }
d0bfb940
JK
1371 case KVM_SET_GUEST_DEBUG: {
1372 struct kvm_guest_debug dbg;
6aa8b732
AK
1373
1374 r = -EFAULT;
2f366987 1375 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1376 goto out;
d0bfb940 1377 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
1378 if (r)
1379 goto out;
1380 r = 0;
1381 break;
1382 }
1961d276
AK
1383 case KVM_SET_SIGNAL_MASK: {
1384 struct kvm_signal_mask __user *sigmask_arg = argp;
1385 struct kvm_signal_mask kvm_sigmask;
1386 sigset_t sigset, *p;
1387
1388 p = NULL;
1389 if (argp) {
1390 r = -EFAULT;
1391 if (copy_from_user(&kvm_sigmask, argp,
1392 sizeof kvm_sigmask))
1393 goto out;
1394 r = -EINVAL;
1395 if (kvm_sigmask.len != sizeof sigset)
1396 goto out;
1397 r = -EFAULT;
1398 if (copy_from_user(&sigset, sigmask_arg->sigset,
1399 sizeof sigset))
1400 goto out;
1401 p = &sigset;
1402 }
1403 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1404 break;
1405 }
b8836737 1406 case KVM_GET_FPU: {
fa3795a7
DH
1407 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1408 r = -ENOMEM;
1409 if (!fpu)
1410 goto out;
1411 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1412 if (r)
1413 goto out;
1414 r = -EFAULT;
fa3795a7 1415 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1416 goto out;
1417 r = 0;
1418 break;
1419 }
1420 case KVM_SET_FPU: {
fa3795a7
DH
1421 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1422 r = -ENOMEM;
1423 if (!fpu)
1424 goto out;
b8836737 1425 r = -EFAULT;
fa3795a7 1426 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1427 goto out;
fa3795a7 1428 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1429 if (r)
1430 goto out;
1431 r = 0;
1432 break;
1433 }
bccf2150 1434 default:
313a3dc7 1435 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1436 }
1437out:
fa3795a7
DH
1438 kfree(fpu);
1439 kfree(kvm_sregs);
bccf2150
AK
1440 return r;
1441}
1442
1443static long kvm_vm_ioctl(struct file *filp,
1444 unsigned int ioctl, unsigned long arg)
1445{
1446 struct kvm *kvm = filp->private_data;
1447 void __user *argp = (void __user *)arg;
1fe779f8 1448 int r;
bccf2150 1449
6d4e4c4f
AK
1450 if (kvm->mm != current->mm)
1451 return -EIO;
bccf2150
AK
1452 switch (ioctl) {
1453 case KVM_CREATE_VCPU:
1454 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1455 if (r < 0)
1456 goto out;
1457 break;
6fc138d2
IE
1458 case KVM_SET_USER_MEMORY_REGION: {
1459 struct kvm_userspace_memory_region kvm_userspace_mem;
1460
1461 r = -EFAULT;
1462 if (copy_from_user(&kvm_userspace_mem, argp,
1463 sizeof kvm_userspace_mem))
1464 goto out;
1465
1466 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1467 if (r)
1468 goto out;
1469 break;
1470 }
1471 case KVM_GET_DIRTY_LOG: {
1472 struct kvm_dirty_log log;
1473
1474 r = -EFAULT;
2f366987 1475 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1476 goto out;
2c6f5df9 1477 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1478 if (r)
1479 goto out;
1480 break;
1481 }
5f94c174
LV
1482#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1483 case KVM_REGISTER_COALESCED_MMIO: {
1484 struct kvm_coalesced_mmio_zone zone;
1485 r = -EFAULT;
1486 if (copy_from_user(&zone, argp, sizeof zone))
1487 goto out;
1488 r = -ENXIO;
1489 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1490 if (r)
1491 goto out;
1492 r = 0;
1493 break;
1494 }
1495 case KVM_UNREGISTER_COALESCED_MMIO: {
1496 struct kvm_coalesced_mmio_zone zone;
1497 r = -EFAULT;
1498 if (copy_from_user(&zone, argp, sizeof zone))
1499 goto out;
1500 r = -ENXIO;
1501 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1502 if (r)
1503 goto out;
1504 r = 0;
1505 break;
1506 }
1507#endif
721eecbf
GH
1508 case KVM_IRQFD: {
1509 struct kvm_irqfd data;
1510
1511 r = -EFAULT;
1512 if (copy_from_user(&data, argp, sizeof data))
1513 goto out;
1514 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1515 break;
1516 }
d34e6b17
GH
1517 case KVM_IOEVENTFD: {
1518 struct kvm_ioeventfd data;
1519
1520 r = -EFAULT;
1521 if (copy_from_user(&data, argp, sizeof data))
1522 goto out;
1523 r = kvm_ioeventfd(kvm, &data);
1524 break;
1525 }
73880c80
GN
1526#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1527 case KVM_SET_BOOT_CPU_ID:
1528 r = 0;
894a9c55 1529 mutex_lock(&kvm->lock);
73880c80
GN
1530 if (atomic_read(&kvm->online_vcpus) != 0)
1531 r = -EBUSY;
1532 else
1533 kvm->bsp_vcpu_id = arg;
894a9c55 1534 mutex_unlock(&kvm->lock);
73880c80
GN
1535 break;
1536#endif
f17abe9a 1537 default:
1fe779f8 1538 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
bfd99ff5
AK
1539 if (r == -ENOTTY)
1540 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
f17abe9a
AK
1541 }
1542out:
1543 return r;
1544}
1545
6ff5894c
AB
1546#ifdef CONFIG_COMPAT
1547struct compat_kvm_dirty_log {
1548 __u32 slot;
1549 __u32 padding1;
1550 union {
1551 compat_uptr_t dirty_bitmap; /* one bit per page */
1552 __u64 padding2;
1553 };
1554};
1555
1556static long kvm_vm_compat_ioctl(struct file *filp,
1557 unsigned int ioctl, unsigned long arg)
1558{
1559 struct kvm *kvm = filp->private_data;
1560 int r;
1561
1562 if (kvm->mm != current->mm)
1563 return -EIO;
1564 switch (ioctl) {
1565 case KVM_GET_DIRTY_LOG: {
1566 struct compat_kvm_dirty_log compat_log;
1567 struct kvm_dirty_log log;
1568
1569 r = -EFAULT;
1570 if (copy_from_user(&compat_log, (void __user *)arg,
1571 sizeof(compat_log)))
1572 goto out;
1573 log.slot = compat_log.slot;
1574 log.padding1 = compat_log.padding1;
1575 log.padding2 = compat_log.padding2;
1576 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1577
1578 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1579 if (r)
1580 goto out;
1581 break;
1582 }
1583 default:
1584 r = kvm_vm_ioctl(filp, ioctl, arg);
1585 }
1586
1587out:
1588 return r;
1589}
1590#endif
1591
e4a533a4 1592static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 1593{
777b3f49
MT
1594 struct page *page[1];
1595 unsigned long addr;
1596 int npages;
1597 gfn_t gfn = vmf->pgoff;
f17abe9a 1598 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 1599
777b3f49
MT
1600 addr = gfn_to_hva(kvm, gfn);
1601 if (kvm_is_error_hva(addr))
e4a533a4 1602 return VM_FAULT_SIGBUS;
777b3f49
MT
1603
1604 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1605 NULL);
1606 if (unlikely(npages != 1))
e4a533a4 1607 return VM_FAULT_SIGBUS;
777b3f49
MT
1608
1609 vmf->page = page[0];
e4a533a4 1610 return 0;
f17abe9a
AK
1611}
1612
f0f37e2f 1613static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1614 .fault = kvm_vm_fault,
f17abe9a
AK
1615};
1616
1617static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1618{
1619 vma->vm_ops = &kvm_vm_vm_ops;
1620 return 0;
1621}
1622
3d3aab1b 1623static struct file_operations kvm_vm_fops = {
f17abe9a
AK
1624 .release = kvm_vm_release,
1625 .unlocked_ioctl = kvm_vm_ioctl,
6ff5894c
AB
1626#ifdef CONFIG_COMPAT
1627 .compat_ioctl = kvm_vm_compat_ioctl,
1628#endif
f17abe9a
AK
1629 .mmap = kvm_vm_mmap,
1630};
1631
1632static int kvm_dev_ioctl_create_vm(void)
1633{
2030a42c 1634 int fd;
f17abe9a
AK
1635 struct kvm *kvm;
1636
f17abe9a 1637 kvm = kvm_create_vm();
d6d28168
AK
1638 if (IS_ERR(kvm))
1639 return PTR_ERR(kvm);
7d9dbca3 1640 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 1641 if (fd < 0)
66c0b394 1642 kvm_put_kvm(kvm);
f17abe9a 1643
f17abe9a 1644 return fd;
f17abe9a
AK
1645}
1646
1a811b61
AK
1647static long kvm_dev_ioctl_check_extension_generic(long arg)
1648{
1649 switch (arg) {
ca9edaee 1650 case KVM_CAP_USER_MEMORY:
1a811b61 1651 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 1652 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
1653#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1654 case KVM_CAP_SET_BOOT_CPU_ID:
1655#endif
1a811b61 1656 return 1;
399ec807
AK
1657#ifdef CONFIG_HAVE_KVM_IRQCHIP
1658 case KVM_CAP_IRQ_ROUTING:
36463146 1659 return KVM_MAX_IRQ_ROUTES;
399ec807 1660#endif
1a811b61
AK
1661 default:
1662 break;
1663 }
1664 return kvm_dev_ioctl_check_extension(arg);
1665}
1666
f17abe9a
AK
1667static long kvm_dev_ioctl(struct file *filp,
1668 unsigned int ioctl, unsigned long arg)
1669{
07c45a36 1670 long r = -EINVAL;
f17abe9a
AK
1671
1672 switch (ioctl) {
1673 case KVM_GET_API_VERSION:
f0fe5108
AK
1674 r = -EINVAL;
1675 if (arg)
1676 goto out;
f17abe9a
AK
1677 r = KVM_API_VERSION;
1678 break;
1679 case KVM_CREATE_VM:
f0fe5108
AK
1680 r = -EINVAL;
1681 if (arg)
1682 goto out;
f17abe9a
AK
1683 r = kvm_dev_ioctl_create_vm();
1684 break;
018d00d2 1685 case KVM_CHECK_EXTENSION:
1a811b61 1686 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 1687 break;
07c45a36
AK
1688 case KVM_GET_VCPU_MMAP_SIZE:
1689 r = -EINVAL;
1690 if (arg)
1691 goto out;
adb1ff46
AK
1692 r = PAGE_SIZE; /* struct kvm_run */
1693#ifdef CONFIG_X86
1694 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
1695#endif
1696#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1697 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 1698#endif
07c45a36 1699 break;
d4c9ff2d
FEL
1700 case KVM_TRACE_ENABLE:
1701 case KVM_TRACE_PAUSE:
1702 case KVM_TRACE_DISABLE:
2023a29c 1703 r = -EOPNOTSUPP;
d4c9ff2d 1704 break;
6aa8b732 1705 default:
043405e1 1706 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1707 }
1708out:
1709 return r;
1710}
1711
6aa8b732 1712static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1713 .unlocked_ioctl = kvm_dev_ioctl,
1714 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1715};
1716
1717static struct miscdevice kvm_dev = {
bbe4432e 1718 KVM_MINOR,
6aa8b732
AK
1719 "kvm",
1720 &kvm_chardev_ops,
1721};
1722
1b6c0168
AK
1723static void hardware_enable(void *junk)
1724{
1725 int cpu = raw_smp_processor_id();
10474ae8 1726 int r;
1b6c0168 1727
7f59f492 1728 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1729 return;
10474ae8 1730
7f59f492 1731 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8
AG
1732
1733 r = kvm_arch_hardware_enable(NULL);
1734
1735 if (r) {
1736 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1737 atomic_inc(&hardware_enable_failed);
1738 printk(KERN_INFO "kvm: enabling virtualization on "
1739 "CPU%d failed\n", cpu);
1740 }
1b6c0168
AK
1741}
1742
1743static void hardware_disable(void *junk)
1744{
1745 int cpu = raw_smp_processor_id();
1746
7f59f492 1747 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1748 return;
7f59f492 1749 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 1750 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1751}
1752
10474ae8
AG
1753static void hardware_disable_all_nolock(void)
1754{
1755 BUG_ON(!kvm_usage_count);
1756
1757 kvm_usage_count--;
1758 if (!kvm_usage_count)
1759 on_each_cpu(hardware_disable, NULL, 1);
1760}
1761
1762static void hardware_disable_all(void)
1763{
1764 spin_lock(&kvm_lock);
1765 hardware_disable_all_nolock();
1766 spin_unlock(&kvm_lock);
1767}
1768
1769static int hardware_enable_all(void)
1770{
1771 int r = 0;
1772
1773 spin_lock(&kvm_lock);
1774
1775 kvm_usage_count++;
1776 if (kvm_usage_count == 1) {
1777 atomic_set(&hardware_enable_failed, 0);
1778 on_each_cpu(hardware_enable, NULL, 1);
1779
1780 if (atomic_read(&hardware_enable_failed)) {
1781 hardware_disable_all_nolock();
1782 r = -EBUSY;
1783 }
1784 }
1785
1786 spin_unlock(&kvm_lock);
1787
1788 return r;
1789}
1790
774c47f1
AK
1791static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1792 void *v)
1793{
1794 int cpu = (long)v;
1795
10474ae8
AG
1796 if (!kvm_usage_count)
1797 return NOTIFY_OK;
1798
1a6f4d7f 1799 val &= ~CPU_TASKS_FROZEN;
774c47f1 1800 switch (val) {
cec9ad27 1801 case CPU_DYING:
6ec8a856
AK
1802 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1803 cpu);
1804 hardware_disable(NULL);
1805 break;
774c47f1 1806 case CPU_UP_CANCELED:
43934a38
JK
1807 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1808 cpu);
8691e5a8 1809 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 1810 break;
43934a38
JK
1811 case CPU_ONLINE:
1812 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1813 cpu);
8691e5a8 1814 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
1815 break;
1816 }
1817 return NOTIFY_OK;
1818}
1819
4ecac3fd
AK
1820
1821asmlinkage void kvm_handle_fault_on_reboot(void)
1822{
1823 if (kvm_rebooting)
1824 /* spin while reset goes on */
1825 while (true)
1826 ;
1827 /* Fault while not rebooting. We want the trace. */
1828 BUG();
1829}
1830EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1831
9a2b85c6 1832static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1833 void *v)
9a2b85c6 1834{
8e1c1815
SY
1835 /*
1836 * Some (well, at least mine) BIOSes hang on reboot if
1837 * in vmx root mode.
1838 *
1839 * And Intel TXT required VMX off for all cpu when system shutdown.
1840 */
1841 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1842 kvm_rebooting = true;
1843 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
1844 return NOTIFY_OK;
1845}
1846
1847static struct notifier_block kvm_reboot_notifier = {
1848 .notifier_call = kvm_reboot,
1849 .priority = 0,
1850};
1851
2eeb2e94
GH
1852void kvm_io_bus_init(struct kvm_io_bus *bus)
1853{
1854 memset(bus, 0, sizeof(*bus));
1855}
1856
1857void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1858{
1859 int i;
1860
1861 for (i = 0; i < bus->dev_count; i++) {
1862 struct kvm_io_device *pos = bus->devs[i];
1863
1864 kvm_iodevice_destructor(pos);
1865 }
1866}
1867
bda9020e
MT
1868/* kvm_io_bus_write - called under kvm->slots_lock */
1869int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
1870 int len, const void *val)
2eeb2e94
GH
1871{
1872 int i;
bda9020e
MT
1873 for (i = 0; i < bus->dev_count; i++)
1874 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
1875 return 0;
1876 return -EOPNOTSUPP;
1877}
2eeb2e94 1878
bda9020e
MT
1879/* kvm_io_bus_read - called under kvm->slots_lock */
1880int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
1881{
1882 int i;
1883 for (i = 0; i < bus->dev_count; i++)
1884 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
1885 return 0;
1886 return -EOPNOTSUPP;
2eeb2e94
GH
1887}
1888
090b7aff 1889int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
6c474694
MT
1890 struct kvm_io_device *dev)
1891{
090b7aff
GH
1892 int ret;
1893
6c474694 1894 down_write(&kvm->slots_lock);
090b7aff 1895 ret = __kvm_io_bus_register_dev(bus, dev);
6c474694 1896 up_write(&kvm->slots_lock);
090b7aff
GH
1897
1898 return ret;
6c474694
MT
1899}
1900
1901/* An unlocked version. Caller must have write lock on slots_lock. */
090b7aff
GH
1902int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
1903 struct kvm_io_device *dev)
2eeb2e94 1904{
090b7aff
GH
1905 if (bus->dev_count > NR_IOBUS_DEVS-1)
1906 return -ENOSPC;
2eeb2e94
GH
1907
1908 bus->devs[bus->dev_count++] = dev;
090b7aff
GH
1909
1910 return 0;
1911}
1912
1913void kvm_io_bus_unregister_dev(struct kvm *kvm,
1914 struct kvm_io_bus *bus,
1915 struct kvm_io_device *dev)
1916{
1917 down_write(&kvm->slots_lock);
1918 __kvm_io_bus_unregister_dev(bus, dev);
1919 up_write(&kvm->slots_lock);
1920}
1921
1922/* An unlocked version. Caller must have write lock on slots_lock. */
1923void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
1924 struct kvm_io_device *dev)
1925{
1926 int i;
1927
1928 for (i = 0; i < bus->dev_count; i++)
1929 if (bus->devs[i] == dev) {
1930 bus->devs[i] = bus->devs[--bus->dev_count];
1931 break;
1932 }
2eeb2e94
GH
1933}
1934
774c47f1
AK
1935static struct notifier_block kvm_cpu_notifier = {
1936 .notifier_call = kvm_cpu_hotplug,
1937 .priority = 20, /* must be > scheduler priority */
1938};
1939
8b88b099 1940static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
1941{
1942 unsigned offset = (long)_offset;
ba1389b7
AK
1943 struct kvm *kvm;
1944
8b88b099 1945 *val = 0;
ba1389b7
AK
1946 spin_lock(&kvm_lock);
1947 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 1948 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 1949 spin_unlock(&kvm_lock);
8b88b099 1950 return 0;
ba1389b7
AK
1951}
1952
1953DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1954
8b88b099 1955static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
1956{
1957 unsigned offset = (long)_offset;
1165f5fe
AK
1958 struct kvm *kvm;
1959 struct kvm_vcpu *vcpu;
1960 int i;
1961
8b88b099 1962 *val = 0;
1165f5fe
AK
1963 spin_lock(&kvm_lock);
1964 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
1965 kvm_for_each_vcpu(i, vcpu, kvm)
1966 *val += *(u32 *)((void *)vcpu + offset);
1967
1165f5fe 1968 spin_unlock(&kvm_lock);
8b88b099 1969 return 0;
1165f5fe
AK
1970}
1971
ba1389b7
AK
1972DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1973
828c0950 1974static const struct file_operations *stat_fops[] = {
ba1389b7
AK
1975 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1976 [KVM_STAT_VM] = &vm_stat_fops,
1977};
1165f5fe 1978
a16b043c 1979static void kvm_init_debug(void)
6aa8b732
AK
1980{
1981 struct kvm_stats_debugfs_item *p;
1982
76f7c879 1983 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1984 for (p = debugfs_entries; p->name; ++p)
76f7c879 1985 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 1986 (void *)(long)p->offset,
ba1389b7 1987 stat_fops[p->kind]);
6aa8b732
AK
1988}
1989
1990static void kvm_exit_debug(void)
1991{
1992 struct kvm_stats_debugfs_item *p;
1993
1994 for (p = debugfs_entries; p->name; ++p)
1995 debugfs_remove(p->dentry);
76f7c879 1996 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
1997}
1998
59ae6c6b
AK
1999static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2000{
10474ae8
AG
2001 if (kvm_usage_count)
2002 hardware_disable(NULL);
59ae6c6b
AK
2003 return 0;
2004}
2005
2006static int kvm_resume(struct sys_device *dev)
2007{
10474ae8
AG
2008 if (kvm_usage_count)
2009 hardware_enable(NULL);
59ae6c6b
AK
2010 return 0;
2011}
2012
2013static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2014 .name = "kvm",
59ae6c6b
AK
2015 .suspend = kvm_suspend,
2016 .resume = kvm_resume,
2017};
2018
2019static struct sys_device kvm_sysdev = {
2020 .id = 0,
2021 .cls = &kvm_sysdev_class,
2022};
2023
cea7bb21 2024struct page *bad_page;
35149e21 2025pfn_t bad_pfn;
6aa8b732 2026
15ad7146
AK
2027static inline
2028struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2029{
2030 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2031}
2032
2033static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2034{
2035 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2036
e9b11c17 2037 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2038}
2039
2040static void kvm_sched_out(struct preempt_notifier *pn,
2041 struct task_struct *next)
2042{
2043 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2044
e9b11c17 2045 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2046}
2047
f8c16bba 2048int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2049 struct module *module)
6aa8b732
AK
2050{
2051 int r;
002c7f7c 2052 int cpu;
6aa8b732 2053
f8c16bba
ZX
2054 r = kvm_arch_init(opaque);
2055 if (r)
d2308784 2056 goto out_fail;
cb498ea2
ZX
2057
2058 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2059
2060 if (bad_page == NULL) {
2061 r = -ENOMEM;
2062 goto out;
2063 }
2064
35149e21
AL
2065 bad_pfn = page_to_pfn(bad_page);
2066
8437a617 2067 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2068 r = -ENOMEM;
2069 goto out_free_0;
2070 }
2071
e9b11c17 2072 r = kvm_arch_hardware_setup();
6aa8b732 2073 if (r < 0)
7f59f492 2074 goto out_free_0a;
6aa8b732 2075
002c7f7c
YS
2076 for_each_online_cpu(cpu) {
2077 smp_call_function_single(cpu,
e9b11c17 2078 kvm_arch_check_processor_compat,
8691e5a8 2079 &r, 1);
002c7f7c 2080 if (r < 0)
d2308784 2081 goto out_free_1;
002c7f7c
YS
2082 }
2083
774c47f1
AK
2084 r = register_cpu_notifier(&kvm_cpu_notifier);
2085 if (r)
d2308784 2086 goto out_free_2;
6aa8b732
AK
2087 register_reboot_notifier(&kvm_reboot_notifier);
2088
59ae6c6b
AK
2089 r = sysdev_class_register(&kvm_sysdev_class);
2090 if (r)
d2308784 2091 goto out_free_3;
59ae6c6b
AK
2092
2093 r = sysdev_register(&kvm_sysdev);
2094 if (r)
d2308784 2095 goto out_free_4;
59ae6c6b 2096
c16f862d
RR
2097 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2098 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2099 __alignof__(struct kvm_vcpu),
2100 0, NULL);
c16f862d
RR
2101 if (!kvm_vcpu_cache) {
2102 r = -ENOMEM;
d2308784 2103 goto out_free_5;
c16f862d
RR
2104 }
2105
6aa8b732 2106 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2107 kvm_vm_fops.owner = module;
2108 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2109
2110 r = misc_register(&kvm_dev);
2111 if (r) {
d77c26fc 2112 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2113 goto out_free;
2114 }
2115
15ad7146
AK
2116 kvm_preempt_ops.sched_in = kvm_sched_in;
2117 kvm_preempt_ops.sched_out = kvm_sched_out;
2118
0ea4ed8e
DW
2119 kvm_init_debug();
2120
c7addb90 2121 return 0;
6aa8b732
AK
2122
2123out_free:
c16f862d 2124 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2125out_free_5:
59ae6c6b 2126 sysdev_unregister(&kvm_sysdev);
d2308784 2127out_free_4:
59ae6c6b 2128 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2129out_free_3:
6aa8b732 2130 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2131 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2132out_free_2:
d2308784 2133out_free_1:
e9b11c17 2134 kvm_arch_hardware_unsetup();
7f59f492
RR
2135out_free_0a:
2136 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2137out_free_0:
2138 __free_page(bad_page);
ca45aaae 2139out:
f8c16bba 2140 kvm_arch_exit();
d2308784 2141out_fail:
6aa8b732
AK
2142 return r;
2143}
cb498ea2 2144EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2145
cb498ea2 2146void kvm_exit(void)
6aa8b732 2147{
229456fc 2148 tracepoint_synchronize_unregister();
0ea4ed8e 2149 kvm_exit_debug();
6aa8b732 2150 misc_deregister(&kvm_dev);
c16f862d 2151 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2152 sysdev_unregister(&kvm_sysdev);
2153 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2154 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2155 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2156 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2157 kvm_arch_hardware_unsetup();
f8c16bba 2158 kvm_arch_exit();
7f59f492 2159 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2160 __free_page(bad_page);
6aa8b732 2161}
cb498ea2 2162EXPORT_SYMBOL_GPL(kvm_exit);