]> bbs.cooldavid.org Git - net-next-2.6.git/blame - virt/kvm/kvm_main.c
KVM: split kvm_arch_set_memory_region into prepare and commit
[net-next-2.6.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6ff5894c 46#include <linux/compat.h>
6aa8b732 47
e495606d 48#include <asm/processor.h>
e495606d
AK
49#include <asm/io.h>
50#include <asm/uaccess.h>
3e021bf5 51#include <asm/pgtable.h>
c8240bd6 52#include <asm-generic/bitops/le.h>
6aa8b732 53
5f94c174 54#include "coalesced_mmio.h"
5f94c174 55
229456fc
MT
56#define CREATE_TRACE_POINTS
57#include <trace/events/kvm.h>
58
6aa8b732
AK
59MODULE_AUTHOR("Qumranet");
60MODULE_LICENSE("GPL");
61
fa40a821
MT
62/*
63 * Ordering of locks:
64 *
fae3a353 65 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
fa40a821
MT
66 */
67
e9b11c17
ZX
68DEFINE_SPINLOCK(kvm_lock);
69LIST_HEAD(vm_list);
133de902 70
7f59f492 71static cpumask_var_t cpus_hardware_enabled;
10474ae8
AG
72static int kvm_usage_count = 0;
73static atomic_t hardware_enable_failed;
1b6c0168 74
c16f862d
RR
75struct kmem_cache *kvm_vcpu_cache;
76EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 77
15ad7146
AK
78static __read_mostly struct preempt_ops kvm_preempt_ops;
79
76f7c879 80struct dentry *kvm_debugfs_dir;
6aa8b732 81
bccf2150
AK
82static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
83 unsigned long arg);
10474ae8
AG
84static int hardware_enable_all(void);
85static void hardware_disable_all(void);
bccf2150 86
e8ba5d31 87static bool kvm_rebooting;
4ecac3fd 88
54dee993
MT
89static bool largepages_enabled = true;
90
c77fb9dc 91inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 92{
fc5659c8
JR
93 if (pfn_valid(pfn)) {
94 struct page *page = compound_head(pfn_to_page(pfn));
95 return PageReserved(page);
96 }
cbff90a7
BAY
97
98 return true;
99}
100
bccf2150
AK
101/*
102 * Switches to specified vcpu, until a matching vcpu_put()
103 */
313a3dc7 104void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 105{
15ad7146
AK
106 int cpu;
107
bccf2150 108 mutex_lock(&vcpu->mutex);
15ad7146
AK
109 cpu = get_cpu();
110 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 111 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 112 put_cpu();
6aa8b732
AK
113}
114
313a3dc7 115void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 116{
15ad7146 117 preempt_disable();
313a3dc7 118 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
119 preempt_notifier_unregister(&vcpu->preempt_notifier);
120 preempt_enable();
6aa8b732
AK
121 mutex_unlock(&vcpu->mutex);
122}
123
d9e368d6
AK
124static void ack_flush(void *_completed)
125{
d9e368d6
AK
126}
127
49846896 128static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 129{
597a5f55 130 int i, cpu, me;
6ef7a1bc
RR
131 cpumask_var_t cpus;
132 bool called = true;
d9e368d6 133 struct kvm_vcpu *vcpu;
d9e368d6 134
79f55997 135 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 136
84261923 137 spin_lock(&kvm->requests_lock);
e601e3be 138 me = smp_processor_id();
988a2cae 139 kvm_for_each_vcpu(i, vcpu, kvm) {
49846896 140 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
141 continue;
142 cpu = vcpu->cpu;
6ef7a1bc
RR
143 if (cpus != NULL && cpu != -1 && cpu != me)
144 cpumask_set_cpu(cpu, cpus);
49846896 145 }
6ef7a1bc
RR
146 if (unlikely(cpus == NULL))
147 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
148 else if (!cpumask_empty(cpus))
149 smp_call_function_many(cpus, ack_flush, NULL, 1);
150 else
151 called = false;
84261923 152 spin_unlock(&kvm->requests_lock);
6ef7a1bc 153 free_cpumask_var(cpus);
49846896 154 return called;
d9e368d6
AK
155}
156
49846896 157void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 158{
49846896
RR
159 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
160 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
161}
162
49846896
RR
163void kvm_reload_remote_mmus(struct kvm *kvm)
164{
165 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
166}
2e53d63a 167
fb3f0f51
RR
168int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
169{
170 struct page *page;
171 int r;
172
173 mutex_init(&vcpu->mutex);
174 vcpu->cpu = -1;
fb3f0f51
RR
175 vcpu->kvm = kvm;
176 vcpu->vcpu_id = id;
b6958ce4 177 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
178
179 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
180 if (!page) {
181 r = -ENOMEM;
182 goto fail;
183 }
184 vcpu->run = page_address(page);
185
e9b11c17 186 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 187 if (r < 0)
e9b11c17 188 goto fail_free_run;
fb3f0f51
RR
189 return 0;
190
fb3f0f51
RR
191fail_free_run:
192 free_page((unsigned long)vcpu->run);
193fail:
76fafa5e 194 return r;
fb3f0f51
RR
195}
196EXPORT_SYMBOL_GPL(kvm_vcpu_init);
197
198void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
199{
e9b11c17 200 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
201 free_page((unsigned long)vcpu->run);
202}
203EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
204
e930bffe
AA
205#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
206static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
207{
208 return container_of(mn, struct kvm, mmu_notifier);
209}
210
211static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
212 struct mm_struct *mm,
213 unsigned long address)
214{
215 struct kvm *kvm = mmu_notifier_to_kvm(mn);
216 int need_tlb_flush;
217
218 /*
219 * When ->invalidate_page runs, the linux pte has been zapped
220 * already but the page is still allocated until
221 * ->invalidate_page returns. So if we increase the sequence
222 * here the kvm page fault will notice if the spte can't be
223 * established because the page is going to be freed. If
224 * instead the kvm page fault establishes the spte before
225 * ->invalidate_page runs, kvm_unmap_hva will release it
226 * before returning.
227 *
228 * The sequence increase only need to be seen at spin_unlock
229 * time, and not at spin_lock time.
230 *
231 * Increasing the sequence after the spin_unlock would be
232 * unsafe because the kvm page fault could then establish the
233 * pte after kvm_unmap_hva returned, without noticing the page
234 * is going to be freed.
235 */
236 spin_lock(&kvm->mmu_lock);
237 kvm->mmu_notifier_seq++;
238 need_tlb_flush = kvm_unmap_hva(kvm, address);
239 spin_unlock(&kvm->mmu_lock);
240
241 /* we've to flush the tlb before the pages can be freed */
242 if (need_tlb_flush)
243 kvm_flush_remote_tlbs(kvm);
244
245}
246
3da0dd43
IE
247static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
248 struct mm_struct *mm,
249 unsigned long address,
250 pte_t pte)
251{
252 struct kvm *kvm = mmu_notifier_to_kvm(mn);
253
254 spin_lock(&kvm->mmu_lock);
255 kvm->mmu_notifier_seq++;
256 kvm_set_spte_hva(kvm, address, pte);
257 spin_unlock(&kvm->mmu_lock);
258}
259
e930bffe
AA
260static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
261 struct mm_struct *mm,
262 unsigned long start,
263 unsigned long end)
264{
265 struct kvm *kvm = mmu_notifier_to_kvm(mn);
266 int need_tlb_flush = 0;
267
268 spin_lock(&kvm->mmu_lock);
269 /*
270 * The count increase must become visible at unlock time as no
271 * spte can be established without taking the mmu_lock and
272 * count is also read inside the mmu_lock critical section.
273 */
274 kvm->mmu_notifier_count++;
275 for (; start < end; start += PAGE_SIZE)
276 need_tlb_flush |= kvm_unmap_hva(kvm, start);
277 spin_unlock(&kvm->mmu_lock);
278
279 /* we've to flush the tlb before the pages can be freed */
280 if (need_tlb_flush)
281 kvm_flush_remote_tlbs(kvm);
282}
283
284static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
285 struct mm_struct *mm,
286 unsigned long start,
287 unsigned long end)
288{
289 struct kvm *kvm = mmu_notifier_to_kvm(mn);
290
291 spin_lock(&kvm->mmu_lock);
292 /*
293 * This sequence increase will notify the kvm page fault that
294 * the page that is going to be mapped in the spte could have
295 * been freed.
296 */
297 kvm->mmu_notifier_seq++;
298 /*
299 * The above sequence increase must be visible before the
300 * below count decrease but both values are read by the kvm
301 * page fault under mmu_lock spinlock so we don't need to add
302 * a smb_wmb() here in between the two.
303 */
304 kvm->mmu_notifier_count--;
305 spin_unlock(&kvm->mmu_lock);
306
307 BUG_ON(kvm->mmu_notifier_count < 0);
308}
309
310static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
311 struct mm_struct *mm,
312 unsigned long address)
313{
314 struct kvm *kvm = mmu_notifier_to_kvm(mn);
315 int young;
316
317 spin_lock(&kvm->mmu_lock);
318 young = kvm_age_hva(kvm, address);
319 spin_unlock(&kvm->mmu_lock);
320
321 if (young)
322 kvm_flush_remote_tlbs(kvm);
323
324 return young;
325}
326
85db06e5
MT
327static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
328 struct mm_struct *mm)
329{
330 struct kvm *kvm = mmu_notifier_to_kvm(mn);
331 kvm_arch_flush_shadow(kvm);
332}
333
e930bffe
AA
334static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
335 .invalidate_page = kvm_mmu_notifier_invalidate_page,
336 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
337 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
338 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
3da0dd43 339 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 340 .release = kvm_mmu_notifier_release,
e930bffe 341};
4c07b0a4
AK
342
343static int kvm_init_mmu_notifier(struct kvm *kvm)
344{
345 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
346 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
347}
348
349#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
350
351static int kvm_init_mmu_notifier(struct kvm *kvm)
352{
353 return 0;
354}
355
e930bffe
AA
356#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
357
f17abe9a 358static struct kvm *kvm_create_vm(void)
6aa8b732 359{
10474ae8 360 int r = 0;
d19a9cd2 361 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
362#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
363 struct page *page;
364#endif
6aa8b732 365
d19a9cd2
ZX
366 if (IS_ERR(kvm))
367 goto out;
10474ae8
AG
368
369 r = hardware_enable_all();
370 if (r)
371 goto out_err_nodisable;
372
75858a84
AK
373#ifdef CONFIG_HAVE_KVM_IRQCHIP
374 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
136bdfee 375 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 376#endif
6aa8b732 377
46a26bf5
MT
378 r = -ENOMEM;
379 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
380 if (!kvm->memslots)
381 goto out_err;
382
5f94c174
LV
383#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
384 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
46a26bf5 385 if (!page)
10474ae8 386 goto out_err;
46a26bf5 387
5f94c174
LV
388 kvm->coalesced_mmio_ring =
389 (struct kvm_coalesced_mmio_ring *)page_address(page);
390#endif
391
4c07b0a4 392 r = kvm_init_mmu_notifier(kvm);
283d0c65 393 if (r) {
e930bffe 394#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
283d0c65 395 put_page(page);
e930bffe 396#endif
283d0c65 397 goto out_err;
e930bffe 398 }
e930bffe 399
6d4e4c4f
AK
400 kvm->mm = current->mm;
401 atomic_inc(&kvm->mm->mm_count);
aaee2c94 402 spin_lock_init(&kvm->mmu_lock);
84261923 403 spin_lock_init(&kvm->requests_lock);
74906345 404 kvm_io_bus_init(&kvm->pio_bus);
d34e6b17 405 kvm_eventfd_init(kvm);
11ec2804 406 mutex_init(&kvm->lock);
60eead79 407 mutex_init(&kvm->irq_lock);
2eeb2e94 408 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 409 init_rwsem(&kvm->slots_lock);
d39f13b0 410 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
411 spin_lock(&kvm_lock);
412 list_add(&kvm->vm_list, &vm_list);
413 spin_unlock(&kvm_lock);
5f94c174
LV
414#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
415 kvm_coalesced_mmio_init(kvm);
416#endif
d19a9cd2 417out:
f17abe9a 418 return kvm;
10474ae8
AG
419
420out_err:
421 hardware_disable_all();
422out_err_nodisable:
46a26bf5 423 kfree(kvm->memslots);
10474ae8
AG
424 kfree(kvm);
425 return ERR_PTR(r);
f17abe9a
AK
426}
427
6aa8b732
AK
428/*
429 * Free any memory in @free but not in @dont.
430 */
431static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
432 struct kvm_memory_slot *dont)
433{
ec04b260
JR
434 int i;
435
290fc38d
IE
436 if (!dont || free->rmap != dont->rmap)
437 vfree(free->rmap);
6aa8b732
AK
438
439 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
440 vfree(free->dirty_bitmap);
441
ec04b260
JR
442
443 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
444 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
445 vfree(free->lpage_info[i]);
446 free->lpage_info[i] = NULL;
447 }
448 }
05da4558 449
6aa8b732 450 free->npages = 0;
8b6d44c7 451 free->dirty_bitmap = NULL;
8d4e1288 452 free->rmap = NULL;
6aa8b732
AK
453}
454
d19a9cd2 455void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
456{
457 int i;
46a26bf5
MT
458 struct kvm_memslots *slots = kvm->memslots;
459
460 for (i = 0; i < slots->nmemslots; ++i)
461 kvm_free_physmem_slot(&slots->memslots[i], NULL);
6aa8b732 462
46a26bf5 463 kfree(kvm->memslots);
6aa8b732
AK
464}
465
f17abe9a
AK
466static void kvm_destroy_vm(struct kvm *kvm)
467{
6d4e4c4f
AK
468 struct mm_struct *mm = kvm->mm;
469
ad8ba2cd 470 kvm_arch_sync_events(kvm);
133de902
AK
471 spin_lock(&kvm_lock);
472 list_del(&kvm->vm_list);
473 spin_unlock(&kvm_lock);
399ec807 474 kvm_free_irq_routing(kvm);
74906345 475 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 476 kvm_io_bus_destroy(&kvm->mmio_bus);
980da6ce 477 kvm_coalesced_mmio_free(kvm);
e930bffe
AA
478#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
479 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
480#else
481 kvm_arch_flush_shadow(kvm);
5f94c174 482#endif
d19a9cd2 483 kvm_arch_destroy_vm(kvm);
10474ae8 484 hardware_disable_all();
6d4e4c4f 485 mmdrop(mm);
f17abe9a
AK
486}
487
d39f13b0
IE
488void kvm_get_kvm(struct kvm *kvm)
489{
490 atomic_inc(&kvm->users_count);
491}
492EXPORT_SYMBOL_GPL(kvm_get_kvm);
493
494void kvm_put_kvm(struct kvm *kvm)
495{
496 if (atomic_dec_and_test(&kvm->users_count))
497 kvm_destroy_vm(kvm);
498}
499EXPORT_SYMBOL_GPL(kvm_put_kvm);
500
501
f17abe9a
AK
502static int kvm_vm_release(struct inode *inode, struct file *filp)
503{
504 struct kvm *kvm = filp->private_data;
505
721eecbf
GH
506 kvm_irqfd_release(kvm);
507
d39f13b0 508 kvm_put_kvm(kvm);
6aa8b732
AK
509 return 0;
510}
511
6aa8b732
AK
512/*
513 * Allocate some memory and give it an address in the guest physical address
514 * space.
515 *
516 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 517 *
10589a46 518 * Must be called holding mmap_sem for write.
6aa8b732 519 */
f78e0e2e
SY
520int __kvm_set_memory_region(struct kvm *kvm,
521 struct kvm_userspace_memory_region *mem,
522 int user_alloc)
6aa8b732
AK
523{
524 int r;
525 gfn_t base_gfn;
28bcb112
HC
526 unsigned long npages;
527 unsigned long i;
6aa8b732
AK
528 struct kvm_memory_slot *memslot;
529 struct kvm_memory_slot old, new;
6aa8b732
AK
530
531 r = -EINVAL;
532 /* General sanity checks */
533 if (mem->memory_size & (PAGE_SIZE - 1))
534 goto out;
535 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
536 goto out;
e7cacd40 537 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 538 goto out;
e0d62c7f 539 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
540 goto out;
541 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
542 goto out;
543
46a26bf5 544 memslot = &kvm->memslots->memslots[mem->slot];
6aa8b732
AK
545 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
546 npages = mem->memory_size >> PAGE_SHIFT;
547
548 if (!npages)
549 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
550
6aa8b732
AK
551 new = old = *memslot;
552
553 new.base_gfn = base_gfn;
554 new.npages = npages;
555 new.flags = mem->flags;
556
557 /* Disallow changing a memory slot's size. */
558 r = -EINVAL;
559 if (npages && old.npages && npages != old.npages)
f78e0e2e 560 goto out_free;
6aa8b732
AK
561
562 /* Check for overlaps */
563 r = -EEXIST;
564 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
46a26bf5 565 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
6aa8b732 566
4cd481f6 567 if (s == memslot || !s->npages)
6aa8b732
AK
568 continue;
569 if (!((base_gfn + npages <= s->base_gfn) ||
570 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 571 goto out_free;
6aa8b732 572 }
6aa8b732 573
6aa8b732
AK
574 /* Free page dirty bitmap if unneeded */
575 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 576 new.dirty_bitmap = NULL;
6aa8b732
AK
577
578 r = -ENOMEM;
579
580 /* Allocate if a slot is being created */
eff0114a 581#ifndef CONFIG_S390
8d4e1288 582 if (npages && !new.rmap) {
d77c26fc 583 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
584
585 if (!new.rmap)
f78e0e2e 586 goto out_free;
290fc38d 587
290fc38d 588 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 589
80b14b5b 590 new.user_alloc = user_alloc;
604b38ac
AA
591 /*
592 * hva_to_rmmap() serialzies with the mmu_lock and to be
593 * safe it has to ignore memslots with !user_alloc &&
594 * !userspace_addr.
595 */
596 if (user_alloc)
597 new.userspace_addr = mem->userspace_addr;
598 else
599 new.userspace_addr = 0;
6aa8b732 600 }
ec04b260
JR
601 if (!npages)
602 goto skip_lpage;
05da4558 603
ec04b260 604 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
28bcb112
HC
605 unsigned long ugfn;
606 unsigned long j;
607 int lpages;
ec04b260 608 int level = i + 2;
05da4558 609
ec04b260
JR
610 /* Avoid unused variable warning if no large pages */
611 (void)level;
612
613 if (new.lpage_info[i])
614 continue;
615
616 lpages = 1 + (base_gfn + npages - 1) /
617 KVM_PAGES_PER_HPAGE(level);
618 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
619
620 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
621
622 if (!new.lpage_info[i])
05da4558
MT
623 goto out_free;
624
ec04b260
JR
625 memset(new.lpage_info[i], 0,
626 lpages * sizeof(*new.lpage_info[i]));
05da4558 627
ec04b260
JR
628 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
629 new.lpage_info[i][0].write_count = 1;
630 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
631 new.lpage_info[i][lpages - 1].write_count = 1;
ac04527f
AK
632 ugfn = new.userspace_addr >> PAGE_SHIFT;
633 /*
634 * If the gfn and userspace address are not aligned wrt each
54dee993
MT
635 * other, or if explicitly asked to, disable large page
636 * support for this slot
ac04527f 637 */
ec04b260 638 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
54dee993 639 !largepages_enabled)
ec04b260
JR
640 for (j = 0; j < lpages; ++j)
641 new.lpage_info[i][j].write_count = 1;
05da4558 642 }
6aa8b732 643
ec04b260
JR
644skip_lpage:
645
6aa8b732
AK
646 /* Allocate page dirty bitmap if needed */
647 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
648 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
649
650 new.dirty_bitmap = vmalloc(dirty_bytes);
651 if (!new.dirty_bitmap)
f78e0e2e 652 goto out_free;
6aa8b732 653 memset(new.dirty_bitmap, 0, dirty_bytes);
e244584f
IE
654 if (old.npages)
655 kvm_arch_flush_shadow(kvm);
6aa8b732 656 }
3eea8437
CB
657#else /* not defined CONFIG_S390 */
658 new.user_alloc = user_alloc;
659 if (user_alloc)
660 new.userspace_addr = mem->userspace_addr;
eff0114a 661#endif /* not defined CONFIG_S390 */
6aa8b732 662
34d4cb8f
MT
663 if (!npages)
664 kvm_arch_flush_shadow(kvm);
665
f7784b8e
MT
666 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
667 if (r)
668 goto out_free;
669
604b38ac 670 spin_lock(&kvm->mmu_lock);
46a26bf5
MT
671 if (mem->slot >= kvm->memslots->nmemslots)
672 kvm->memslots->nmemslots = mem->slot + 1;
604b38ac 673
3ad82a7e 674 *memslot = new;
604b38ac 675 spin_unlock(&kvm->mmu_lock);
3ad82a7e 676
f7784b8e 677 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
82ce2c96 678
6f897248
GC
679 kvm_free_physmem_slot(&old, npages ? &new : NULL);
680 /* Slot deletion case: we have to update the current slot */
b43b1901 681 spin_lock(&kvm->mmu_lock);
6f897248
GC
682 if (!npages)
683 *memslot = old;
b43b1901 684 spin_unlock(&kvm->mmu_lock);
8a98f664 685#ifdef CONFIG_DMAR
62c476c7
BAY
686 /* map the pages in iommu page table */
687 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
688 if (r)
689 goto out;
8a98f664 690#endif
6aa8b732
AK
691 return 0;
692
f78e0e2e 693out_free:
6aa8b732
AK
694 kvm_free_physmem_slot(&new, &old);
695out:
696 return r;
210c7c4d
IE
697
698}
f78e0e2e
SY
699EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
700
701int kvm_set_memory_region(struct kvm *kvm,
702 struct kvm_userspace_memory_region *mem,
703 int user_alloc)
704{
705 int r;
706
72dc67a6 707 down_write(&kvm->slots_lock);
f78e0e2e 708 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 709 up_write(&kvm->slots_lock);
f78e0e2e
SY
710 return r;
711}
210c7c4d
IE
712EXPORT_SYMBOL_GPL(kvm_set_memory_region);
713
1fe779f8
CO
714int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
715 struct
716 kvm_userspace_memory_region *mem,
717 int user_alloc)
210c7c4d 718{
e0d62c7f
IE
719 if (mem->slot >= KVM_MEMORY_SLOTS)
720 return -EINVAL;
210c7c4d 721 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
722}
723
5bb064dc
ZX
724int kvm_get_dirty_log(struct kvm *kvm,
725 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
726{
727 struct kvm_memory_slot *memslot;
728 int r, i;
729 int n;
730 unsigned long any = 0;
731
6aa8b732
AK
732 r = -EINVAL;
733 if (log->slot >= KVM_MEMORY_SLOTS)
734 goto out;
735
46a26bf5 736 memslot = &kvm->memslots->memslots[log->slot];
6aa8b732
AK
737 r = -ENOENT;
738 if (!memslot->dirty_bitmap)
739 goto out;
740
cd1a4a98 741 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 742
cd1a4a98 743 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
744 any = memslot->dirty_bitmap[i];
745
746 r = -EFAULT;
747 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
748 goto out;
749
5bb064dc
ZX
750 if (any)
751 *is_dirty = 1;
6aa8b732
AK
752
753 r = 0;
6aa8b732 754out:
6aa8b732
AK
755 return r;
756}
757
54dee993
MT
758void kvm_disable_largepages(void)
759{
760 largepages_enabled = false;
761}
762EXPORT_SYMBOL_GPL(kvm_disable_largepages);
763
cea7bb21
IE
764int is_error_page(struct page *page)
765{
766 return page == bad_page;
767}
768EXPORT_SYMBOL_GPL(is_error_page);
769
35149e21
AL
770int is_error_pfn(pfn_t pfn)
771{
772 return pfn == bad_pfn;
773}
774EXPORT_SYMBOL_GPL(is_error_pfn);
775
f9d46eb0
IE
776static inline unsigned long bad_hva(void)
777{
778 return PAGE_OFFSET;
779}
780
781int kvm_is_error_hva(unsigned long addr)
782{
783 return addr == bad_hva();
784}
785EXPORT_SYMBOL_GPL(kvm_is_error_hva);
786
2843099f 787struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
788{
789 int i;
46a26bf5 790 struct kvm_memslots *slots = kvm->memslots;
6aa8b732 791
46a26bf5
MT
792 for (i = 0; i < slots->nmemslots; ++i) {
793 struct kvm_memory_slot *memslot = &slots->memslots[i];
6aa8b732
AK
794
795 if (gfn >= memslot->base_gfn
796 && gfn < memslot->base_gfn + memslot->npages)
797 return memslot;
798 }
8b6d44c7 799 return NULL;
6aa8b732 800}
2843099f 801EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
802
803struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
804{
805 gfn = unalias_gfn(kvm, gfn);
2843099f 806 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 807}
6aa8b732 808
e0d62c7f
IE
809int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
810{
811 int i;
46a26bf5 812 struct kvm_memslots *slots = kvm->memslots;
e0d62c7f
IE
813
814 gfn = unalias_gfn(kvm, gfn);
815 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
46a26bf5 816 struct kvm_memory_slot *memslot = &slots->memslots[i];
e0d62c7f
IE
817
818 if (gfn >= memslot->base_gfn
819 && gfn < memslot->base_gfn + memslot->npages)
820 return 1;
821 }
822 return 0;
823}
824EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
825
05da4558 826unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
827{
828 struct kvm_memory_slot *slot;
829
830 gfn = unalias_gfn(kvm, gfn);
2843099f 831 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
832 if (!slot)
833 return bad_hva();
834 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
835}
0d150298 836EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 837
35149e21 838pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 839{
8d4e1288 840 struct page *page[1];
539cb660 841 unsigned long addr;
8d4e1288 842 int npages;
2e2e3738 843 pfn_t pfn;
954bbbc2 844
60395224
AK
845 might_sleep();
846
539cb660
IE
847 addr = gfn_to_hva(kvm, gfn);
848 if (kvm_is_error_hva(addr)) {
8a7ae055 849 get_page(bad_page);
35149e21 850 return page_to_pfn(bad_page);
8a7ae055 851 }
8d4e1288 852
4c2155ce 853 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 854
2e2e3738
AL
855 if (unlikely(npages != 1)) {
856 struct vm_area_struct *vma;
857
4c2155ce 858 down_read(&current->mm->mmap_sem);
2e2e3738 859 vma = find_vma(current->mm, addr);
4c2155ce 860
2e2e3738
AL
861 if (vma == NULL || addr < vma->vm_start ||
862 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 863 up_read(&current->mm->mmap_sem);
2e2e3738
AL
864 get_page(bad_page);
865 return page_to_pfn(bad_page);
866 }
867
868 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 869 up_read(&current->mm->mmap_sem);
c77fb9dc 870 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
871 } else
872 pfn = page_to_pfn(page[0]);
8d4e1288 873
2e2e3738 874 return pfn;
35149e21
AL
875}
876
877EXPORT_SYMBOL_GPL(gfn_to_pfn);
878
879struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
880{
2e2e3738
AL
881 pfn_t pfn;
882
883 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 884 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
885 return pfn_to_page(pfn);
886
c77fb9dc 887 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
888
889 get_page(bad_page);
890 return bad_page;
954bbbc2 891}
aab61cc0 892
954bbbc2
AK
893EXPORT_SYMBOL_GPL(gfn_to_page);
894
b4231d61
IE
895void kvm_release_page_clean(struct page *page)
896{
35149e21 897 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
898}
899EXPORT_SYMBOL_GPL(kvm_release_page_clean);
900
35149e21
AL
901void kvm_release_pfn_clean(pfn_t pfn)
902{
c77fb9dc 903 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 904 put_page(pfn_to_page(pfn));
35149e21
AL
905}
906EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
907
b4231d61 908void kvm_release_page_dirty(struct page *page)
8a7ae055 909{
35149e21
AL
910 kvm_release_pfn_dirty(page_to_pfn(page));
911}
912EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
913
914void kvm_release_pfn_dirty(pfn_t pfn)
915{
916 kvm_set_pfn_dirty(pfn);
917 kvm_release_pfn_clean(pfn);
918}
919EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
920
921void kvm_set_page_dirty(struct page *page)
922{
923 kvm_set_pfn_dirty(page_to_pfn(page));
924}
925EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
926
927void kvm_set_pfn_dirty(pfn_t pfn)
928{
c77fb9dc 929 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
930 struct page *page = pfn_to_page(pfn);
931 if (!PageReserved(page))
932 SetPageDirty(page);
933 }
8a7ae055 934}
35149e21
AL
935EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
936
937void kvm_set_pfn_accessed(pfn_t pfn)
938{
c77fb9dc 939 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 940 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
941}
942EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
943
944void kvm_get_pfn(pfn_t pfn)
945{
c77fb9dc 946 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 947 get_page(pfn_to_page(pfn));
35149e21
AL
948}
949EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 950
195aefde
IE
951static int next_segment(unsigned long len, int offset)
952{
953 if (len > PAGE_SIZE - offset)
954 return PAGE_SIZE - offset;
955 else
956 return len;
957}
958
959int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
960 int len)
961{
e0506bcb
IE
962 int r;
963 unsigned long addr;
195aefde 964
e0506bcb
IE
965 addr = gfn_to_hva(kvm, gfn);
966 if (kvm_is_error_hva(addr))
967 return -EFAULT;
968 r = copy_from_user(data, (void __user *)addr + offset, len);
969 if (r)
195aefde 970 return -EFAULT;
195aefde
IE
971 return 0;
972}
973EXPORT_SYMBOL_GPL(kvm_read_guest_page);
974
975int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
976{
977 gfn_t gfn = gpa >> PAGE_SHIFT;
978 int seg;
979 int offset = offset_in_page(gpa);
980 int ret;
981
982 while ((seg = next_segment(len, offset)) != 0) {
983 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
984 if (ret < 0)
985 return ret;
986 offset = 0;
987 len -= seg;
988 data += seg;
989 ++gfn;
990 }
991 return 0;
992}
993EXPORT_SYMBOL_GPL(kvm_read_guest);
994
7ec54588
MT
995int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
996 unsigned long len)
997{
998 int r;
999 unsigned long addr;
1000 gfn_t gfn = gpa >> PAGE_SHIFT;
1001 int offset = offset_in_page(gpa);
1002
1003 addr = gfn_to_hva(kvm, gfn);
1004 if (kvm_is_error_hva(addr))
1005 return -EFAULT;
0aac03f0 1006 pagefault_disable();
7ec54588 1007 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1008 pagefault_enable();
7ec54588
MT
1009 if (r)
1010 return -EFAULT;
1011 return 0;
1012}
1013EXPORT_SYMBOL(kvm_read_guest_atomic);
1014
195aefde
IE
1015int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1016 int offset, int len)
1017{
e0506bcb
IE
1018 int r;
1019 unsigned long addr;
195aefde 1020
e0506bcb
IE
1021 addr = gfn_to_hva(kvm, gfn);
1022 if (kvm_is_error_hva(addr))
1023 return -EFAULT;
1024 r = copy_to_user((void __user *)addr + offset, data, len);
1025 if (r)
195aefde 1026 return -EFAULT;
195aefde
IE
1027 mark_page_dirty(kvm, gfn);
1028 return 0;
1029}
1030EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1031
1032int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1033 unsigned long len)
1034{
1035 gfn_t gfn = gpa >> PAGE_SHIFT;
1036 int seg;
1037 int offset = offset_in_page(gpa);
1038 int ret;
1039
1040 while ((seg = next_segment(len, offset)) != 0) {
1041 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1042 if (ret < 0)
1043 return ret;
1044 offset = 0;
1045 len -= seg;
1046 data += seg;
1047 ++gfn;
1048 }
1049 return 0;
1050}
1051
1052int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1053{
3e021bf5 1054 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1055}
1056EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1057
1058int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1059{
1060 gfn_t gfn = gpa >> PAGE_SHIFT;
1061 int seg;
1062 int offset = offset_in_page(gpa);
1063 int ret;
1064
1065 while ((seg = next_segment(len, offset)) != 0) {
1066 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1067 if (ret < 0)
1068 return ret;
1069 offset = 0;
1070 len -= seg;
1071 ++gfn;
1072 }
1073 return 0;
1074}
1075EXPORT_SYMBOL_GPL(kvm_clear_guest);
1076
6aa8b732
AK
1077void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1078{
31389947 1079 struct kvm_memory_slot *memslot;
6aa8b732 1080
3b6fff19 1081 gfn = unalias_gfn(kvm, gfn);
2843099f 1082 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1083 if (memslot && memslot->dirty_bitmap) {
1084 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1085
7e9d619d 1086 /* avoid RMW */
c8240bd6
AG
1087 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
1088 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1089 }
1090}
1091
b6958ce4
ED
1092/*
1093 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1094 */
8776e519 1095void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1096{
e5c239cf
MT
1097 DEFINE_WAIT(wait);
1098
1099 for (;;) {
1100 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1101
a1b37100 1102 if (kvm_arch_vcpu_runnable(vcpu)) {
d7690175 1103 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1104 break;
d7690175 1105 }
09cec754
GN
1106 if (kvm_cpu_has_pending_timer(vcpu))
1107 break;
e5c239cf
MT
1108 if (signal_pending(current))
1109 break;
1110
b6958ce4 1111 schedule();
b6958ce4 1112 }
d3bef15f 1113
e5c239cf 1114 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1115}
1116
6aa8b732
AK
1117void kvm_resched(struct kvm_vcpu *vcpu)
1118{
3fca0365
YD
1119 if (!need_resched())
1120 return;
6aa8b732 1121 cond_resched();
6aa8b732
AK
1122}
1123EXPORT_SYMBOL_GPL(kvm_resched);
1124
d255f4f2
ZE
1125void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1126{
1127 ktime_t expires;
1128 DEFINE_WAIT(wait);
1129
1130 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1131
1132 /* Sleep for 100 us, and hope lock-holder got scheduled */
1133 expires = ktime_add_ns(ktime_get(), 100000UL);
1134 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1135
1136 finish_wait(&vcpu->wq, &wait);
1137}
1138EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1139
e4a533a4 1140static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1141{
1142 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1143 struct page *page;
1144
e4a533a4 1145 if (vmf->pgoff == 0)
039576c0 1146 page = virt_to_page(vcpu->run);
09566765 1147#ifdef CONFIG_X86
e4a533a4 1148 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1149 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1150#endif
1151#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1152 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1153 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1154#endif
039576c0 1155 else
e4a533a4 1156 return VM_FAULT_SIGBUS;
9a2bb7f4 1157 get_page(page);
e4a533a4 1158 vmf->page = page;
1159 return 0;
9a2bb7f4
AK
1160}
1161
f0f37e2f 1162static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1163 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1164};
1165
1166static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1167{
1168 vma->vm_ops = &kvm_vcpu_vm_ops;
1169 return 0;
1170}
1171
bccf2150
AK
1172static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1173{
1174 struct kvm_vcpu *vcpu = filp->private_data;
1175
66c0b394 1176 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1177 return 0;
1178}
1179
3d3aab1b 1180static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1181 .release = kvm_vcpu_release,
1182 .unlocked_ioctl = kvm_vcpu_ioctl,
1183 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1184 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1185};
1186
1187/*
1188 * Allocates an inode for the vcpu.
1189 */
1190static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1191{
628ff7c1 1192 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
bccf2150
AK
1193}
1194
c5ea7660
AK
1195/*
1196 * Creates some virtual cpus. Good luck creating more than one.
1197 */
73880c80 1198static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1199{
1200 int r;
988a2cae 1201 struct kvm_vcpu *vcpu, *v;
c5ea7660 1202
73880c80 1203 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1204 if (IS_ERR(vcpu))
1205 return PTR_ERR(vcpu);
c5ea7660 1206
15ad7146
AK
1207 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1208
26e5215f
AK
1209 r = kvm_arch_vcpu_setup(vcpu);
1210 if (r)
7d8fece6 1211 return r;
26e5215f 1212
11ec2804 1213 mutex_lock(&kvm->lock);
73880c80
GN
1214 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1215 r = -EINVAL;
e9b11c17 1216 goto vcpu_destroy;
fb3f0f51 1217 }
73880c80 1218
988a2cae
GN
1219 kvm_for_each_vcpu(r, v, kvm)
1220 if (v->vcpu_id == id) {
73880c80
GN
1221 r = -EEXIST;
1222 goto vcpu_destroy;
1223 }
1224
1225 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1226
fb3f0f51 1227 /* Now it's all set up, let userspace reach it */
66c0b394 1228 kvm_get_kvm(kvm);
bccf2150 1229 r = create_vcpu_fd(vcpu);
73880c80
GN
1230 if (r < 0) {
1231 kvm_put_kvm(kvm);
1232 goto vcpu_destroy;
1233 }
1234
1235 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1236 smp_wmb();
1237 atomic_inc(&kvm->online_vcpus);
1238
1239#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1240 if (kvm->bsp_vcpu_id == id)
1241 kvm->bsp_vcpu = vcpu;
1242#endif
1243 mutex_unlock(&kvm->lock);
fb3f0f51 1244 return r;
39c3b86e 1245
e9b11c17 1246vcpu_destroy:
7d8fece6 1247 mutex_unlock(&kvm->lock);
d40ccc62 1248 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1249 return r;
1250}
1251
1961d276
AK
1252static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1253{
1254 if (sigset) {
1255 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1256 vcpu->sigset_active = 1;
1257 vcpu->sigset = *sigset;
1258 } else
1259 vcpu->sigset_active = 0;
1260 return 0;
1261}
1262
bccf2150
AK
1263static long kvm_vcpu_ioctl(struct file *filp,
1264 unsigned int ioctl, unsigned long arg)
6aa8b732 1265{
bccf2150 1266 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1267 void __user *argp = (void __user *)arg;
313a3dc7 1268 int r;
fa3795a7
DH
1269 struct kvm_fpu *fpu = NULL;
1270 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1271
6d4e4c4f
AK
1272 if (vcpu->kvm->mm != current->mm)
1273 return -EIO;
6aa8b732 1274 switch (ioctl) {
9a2bb7f4 1275 case KVM_RUN:
f0fe5108
AK
1276 r = -EINVAL;
1277 if (arg)
1278 goto out;
b6c7a5dc 1279 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1280 break;
6aa8b732 1281 case KVM_GET_REGS: {
3e4bb3ac 1282 struct kvm_regs *kvm_regs;
6aa8b732 1283
3e4bb3ac
XZ
1284 r = -ENOMEM;
1285 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1286 if (!kvm_regs)
6aa8b732 1287 goto out;
3e4bb3ac
XZ
1288 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1289 if (r)
1290 goto out_free1;
6aa8b732 1291 r = -EFAULT;
3e4bb3ac
XZ
1292 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1293 goto out_free1;
6aa8b732 1294 r = 0;
3e4bb3ac
XZ
1295out_free1:
1296 kfree(kvm_regs);
6aa8b732
AK
1297 break;
1298 }
1299 case KVM_SET_REGS: {
3e4bb3ac 1300 struct kvm_regs *kvm_regs;
6aa8b732 1301
3e4bb3ac
XZ
1302 r = -ENOMEM;
1303 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1304 if (!kvm_regs)
6aa8b732 1305 goto out;
3e4bb3ac
XZ
1306 r = -EFAULT;
1307 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1308 goto out_free2;
1309 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1310 if (r)
3e4bb3ac 1311 goto out_free2;
6aa8b732 1312 r = 0;
3e4bb3ac
XZ
1313out_free2:
1314 kfree(kvm_regs);
6aa8b732
AK
1315 break;
1316 }
1317 case KVM_GET_SREGS: {
fa3795a7
DH
1318 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1319 r = -ENOMEM;
1320 if (!kvm_sregs)
1321 goto out;
1322 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1323 if (r)
1324 goto out;
1325 r = -EFAULT;
fa3795a7 1326 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1327 goto out;
1328 r = 0;
1329 break;
1330 }
1331 case KVM_SET_SREGS: {
fa3795a7
DH
1332 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1333 r = -ENOMEM;
1334 if (!kvm_sregs)
1335 goto out;
6aa8b732 1336 r = -EFAULT;
fa3795a7 1337 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1338 goto out;
fa3795a7 1339 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1340 if (r)
1341 goto out;
1342 r = 0;
1343 break;
1344 }
62d9f0db
MT
1345 case KVM_GET_MP_STATE: {
1346 struct kvm_mp_state mp_state;
1347
1348 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1349 if (r)
1350 goto out;
1351 r = -EFAULT;
1352 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1353 goto out;
1354 r = 0;
1355 break;
1356 }
1357 case KVM_SET_MP_STATE: {
1358 struct kvm_mp_state mp_state;
1359
1360 r = -EFAULT;
1361 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1362 goto out;
1363 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1364 if (r)
1365 goto out;
1366 r = 0;
1367 break;
1368 }
6aa8b732
AK
1369 case KVM_TRANSLATE: {
1370 struct kvm_translation tr;
1371
1372 r = -EFAULT;
2f366987 1373 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1374 goto out;
8b006791 1375 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1376 if (r)
1377 goto out;
1378 r = -EFAULT;
2f366987 1379 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1380 goto out;
1381 r = 0;
1382 break;
1383 }
d0bfb940
JK
1384 case KVM_SET_GUEST_DEBUG: {
1385 struct kvm_guest_debug dbg;
6aa8b732
AK
1386
1387 r = -EFAULT;
2f366987 1388 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1389 goto out;
d0bfb940 1390 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
1391 if (r)
1392 goto out;
1393 r = 0;
1394 break;
1395 }
1961d276
AK
1396 case KVM_SET_SIGNAL_MASK: {
1397 struct kvm_signal_mask __user *sigmask_arg = argp;
1398 struct kvm_signal_mask kvm_sigmask;
1399 sigset_t sigset, *p;
1400
1401 p = NULL;
1402 if (argp) {
1403 r = -EFAULT;
1404 if (copy_from_user(&kvm_sigmask, argp,
1405 sizeof kvm_sigmask))
1406 goto out;
1407 r = -EINVAL;
1408 if (kvm_sigmask.len != sizeof sigset)
1409 goto out;
1410 r = -EFAULT;
1411 if (copy_from_user(&sigset, sigmask_arg->sigset,
1412 sizeof sigset))
1413 goto out;
1414 p = &sigset;
1415 }
1416 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1417 break;
1418 }
b8836737 1419 case KVM_GET_FPU: {
fa3795a7
DH
1420 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1421 r = -ENOMEM;
1422 if (!fpu)
1423 goto out;
1424 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1425 if (r)
1426 goto out;
1427 r = -EFAULT;
fa3795a7 1428 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1429 goto out;
1430 r = 0;
1431 break;
1432 }
1433 case KVM_SET_FPU: {
fa3795a7
DH
1434 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1435 r = -ENOMEM;
1436 if (!fpu)
1437 goto out;
b8836737 1438 r = -EFAULT;
fa3795a7 1439 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1440 goto out;
fa3795a7 1441 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1442 if (r)
1443 goto out;
1444 r = 0;
1445 break;
1446 }
bccf2150 1447 default:
313a3dc7 1448 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1449 }
1450out:
fa3795a7
DH
1451 kfree(fpu);
1452 kfree(kvm_sregs);
bccf2150
AK
1453 return r;
1454}
1455
1456static long kvm_vm_ioctl(struct file *filp,
1457 unsigned int ioctl, unsigned long arg)
1458{
1459 struct kvm *kvm = filp->private_data;
1460 void __user *argp = (void __user *)arg;
1fe779f8 1461 int r;
bccf2150 1462
6d4e4c4f
AK
1463 if (kvm->mm != current->mm)
1464 return -EIO;
bccf2150
AK
1465 switch (ioctl) {
1466 case KVM_CREATE_VCPU:
1467 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1468 if (r < 0)
1469 goto out;
1470 break;
6fc138d2
IE
1471 case KVM_SET_USER_MEMORY_REGION: {
1472 struct kvm_userspace_memory_region kvm_userspace_mem;
1473
1474 r = -EFAULT;
1475 if (copy_from_user(&kvm_userspace_mem, argp,
1476 sizeof kvm_userspace_mem))
1477 goto out;
1478
1479 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1480 if (r)
1481 goto out;
1482 break;
1483 }
1484 case KVM_GET_DIRTY_LOG: {
1485 struct kvm_dirty_log log;
1486
1487 r = -EFAULT;
2f366987 1488 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1489 goto out;
2c6f5df9 1490 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1491 if (r)
1492 goto out;
1493 break;
1494 }
5f94c174
LV
1495#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1496 case KVM_REGISTER_COALESCED_MMIO: {
1497 struct kvm_coalesced_mmio_zone zone;
1498 r = -EFAULT;
1499 if (copy_from_user(&zone, argp, sizeof zone))
1500 goto out;
1501 r = -ENXIO;
1502 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1503 if (r)
1504 goto out;
1505 r = 0;
1506 break;
1507 }
1508 case KVM_UNREGISTER_COALESCED_MMIO: {
1509 struct kvm_coalesced_mmio_zone zone;
1510 r = -EFAULT;
1511 if (copy_from_user(&zone, argp, sizeof zone))
1512 goto out;
1513 r = -ENXIO;
1514 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1515 if (r)
1516 goto out;
1517 r = 0;
1518 break;
1519 }
1520#endif
721eecbf
GH
1521 case KVM_IRQFD: {
1522 struct kvm_irqfd data;
1523
1524 r = -EFAULT;
1525 if (copy_from_user(&data, argp, sizeof data))
1526 goto out;
1527 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1528 break;
1529 }
d34e6b17
GH
1530 case KVM_IOEVENTFD: {
1531 struct kvm_ioeventfd data;
1532
1533 r = -EFAULT;
1534 if (copy_from_user(&data, argp, sizeof data))
1535 goto out;
1536 r = kvm_ioeventfd(kvm, &data);
1537 break;
1538 }
73880c80
GN
1539#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1540 case KVM_SET_BOOT_CPU_ID:
1541 r = 0;
894a9c55 1542 mutex_lock(&kvm->lock);
73880c80
GN
1543 if (atomic_read(&kvm->online_vcpus) != 0)
1544 r = -EBUSY;
1545 else
1546 kvm->bsp_vcpu_id = arg;
894a9c55 1547 mutex_unlock(&kvm->lock);
73880c80
GN
1548 break;
1549#endif
f17abe9a 1550 default:
1fe779f8 1551 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
bfd99ff5
AK
1552 if (r == -ENOTTY)
1553 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
f17abe9a
AK
1554 }
1555out:
1556 return r;
1557}
1558
6ff5894c
AB
1559#ifdef CONFIG_COMPAT
1560struct compat_kvm_dirty_log {
1561 __u32 slot;
1562 __u32 padding1;
1563 union {
1564 compat_uptr_t dirty_bitmap; /* one bit per page */
1565 __u64 padding2;
1566 };
1567};
1568
1569static long kvm_vm_compat_ioctl(struct file *filp,
1570 unsigned int ioctl, unsigned long arg)
1571{
1572 struct kvm *kvm = filp->private_data;
1573 int r;
1574
1575 if (kvm->mm != current->mm)
1576 return -EIO;
1577 switch (ioctl) {
1578 case KVM_GET_DIRTY_LOG: {
1579 struct compat_kvm_dirty_log compat_log;
1580 struct kvm_dirty_log log;
1581
1582 r = -EFAULT;
1583 if (copy_from_user(&compat_log, (void __user *)arg,
1584 sizeof(compat_log)))
1585 goto out;
1586 log.slot = compat_log.slot;
1587 log.padding1 = compat_log.padding1;
1588 log.padding2 = compat_log.padding2;
1589 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1590
1591 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1592 if (r)
1593 goto out;
1594 break;
1595 }
1596 default:
1597 r = kvm_vm_ioctl(filp, ioctl, arg);
1598 }
1599
1600out:
1601 return r;
1602}
1603#endif
1604
e4a533a4 1605static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 1606{
777b3f49
MT
1607 struct page *page[1];
1608 unsigned long addr;
1609 int npages;
1610 gfn_t gfn = vmf->pgoff;
f17abe9a 1611 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 1612
777b3f49
MT
1613 addr = gfn_to_hva(kvm, gfn);
1614 if (kvm_is_error_hva(addr))
e4a533a4 1615 return VM_FAULT_SIGBUS;
777b3f49
MT
1616
1617 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1618 NULL);
1619 if (unlikely(npages != 1))
e4a533a4 1620 return VM_FAULT_SIGBUS;
777b3f49
MT
1621
1622 vmf->page = page[0];
e4a533a4 1623 return 0;
f17abe9a
AK
1624}
1625
f0f37e2f 1626static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1627 .fault = kvm_vm_fault,
f17abe9a
AK
1628};
1629
1630static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1631{
1632 vma->vm_ops = &kvm_vm_vm_ops;
1633 return 0;
1634}
1635
3d3aab1b 1636static struct file_operations kvm_vm_fops = {
f17abe9a
AK
1637 .release = kvm_vm_release,
1638 .unlocked_ioctl = kvm_vm_ioctl,
6ff5894c
AB
1639#ifdef CONFIG_COMPAT
1640 .compat_ioctl = kvm_vm_compat_ioctl,
1641#endif
f17abe9a
AK
1642 .mmap = kvm_vm_mmap,
1643};
1644
1645static int kvm_dev_ioctl_create_vm(void)
1646{
2030a42c 1647 int fd;
f17abe9a
AK
1648 struct kvm *kvm;
1649
f17abe9a 1650 kvm = kvm_create_vm();
d6d28168
AK
1651 if (IS_ERR(kvm))
1652 return PTR_ERR(kvm);
628ff7c1 1653 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2030a42c 1654 if (fd < 0)
66c0b394 1655 kvm_put_kvm(kvm);
f17abe9a 1656
f17abe9a 1657 return fd;
f17abe9a
AK
1658}
1659
1a811b61
AK
1660static long kvm_dev_ioctl_check_extension_generic(long arg)
1661{
1662 switch (arg) {
ca9edaee 1663 case KVM_CAP_USER_MEMORY:
1a811b61 1664 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 1665 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
1666#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1667 case KVM_CAP_SET_BOOT_CPU_ID:
1668#endif
a9c7399d 1669 case KVM_CAP_INTERNAL_ERROR_DATA:
1a811b61 1670 return 1;
399ec807
AK
1671#ifdef CONFIG_HAVE_KVM_IRQCHIP
1672 case KVM_CAP_IRQ_ROUTING:
36463146 1673 return KVM_MAX_IRQ_ROUTES;
399ec807 1674#endif
1a811b61
AK
1675 default:
1676 break;
1677 }
1678 return kvm_dev_ioctl_check_extension(arg);
1679}
1680
f17abe9a
AK
1681static long kvm_dev_ioctl(struct file *filp,
1682 unsigned int ioctl, unsigned long arg)
1683{
07c45a36 1684 long r = -EINVAL;
f17abe9a
AK
1685
1686 switch (ioctl) {
1687 case KVM_GET_API_VERSION:
f0fe5108
AK
1688 r = -EINVAL;
1689 if (arg)
1690 goto out;
f17abe9a
AK
1691 r = KVM_API_VERSION;
1692 break;
1693 case KVM_CREATE_VM:
f0fe5108
AK
1694 r = -EINVAL;
1695 if (arg)
1696 goto out;
f17abe9a
AK
1697 r = kvm_dev_ioctl_create_vm();
1698 break;
018d00d2 1699 case KVM_CHECK_EXTENSION:
1a811b61 1700 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 1701 break;
07c45a36
AK
1702 case KVM_GET_VCPU_MMAP_SIZE:
1703 r = -EINVAL;
1704 if (arg)
1705 goto out;
adb1ff46
AK
1706 r = PAGE_SIZE; /* struct kvm_run */
1707#ifdef CONFIG_X86
1708 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
1709#endif
1710#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1711 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 1712#endif
07c45a36 1713 break;
d4c9ff2d
FEL
1714 case KVM_TRACE_ENABLE:
1715 case KVM_TRACE_PAUSE:
1716 case KVM_TRACE_DISABLE:
2023a29c 1717 r = -EOPNOTSUPP;
d4c9ff2d 1718 break;
6aa8b732 1719 default:
043405e1 1720 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1721 }
1722out:
1723 return r;
1724}
1725
6aa8b732 1726static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1727 .unlocked_ioctl = kvm_dev_ioctl,
1728 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1729};
1730
1731static struct miscdevice kvm_dev = {
bbe4432e 1732 KVM_MINOR,
6aa8b732
AK
1733 "kvm",
1734 &kvm_chardev_ops,
1735};
1736
1b6c0168
AK
1737static void hardware_enable(void *junk)
1738{
1739 int cpu = raw_smp_processor_id();
10474ae8 1740 int r;
1b6c0168 1741
7f59f492 1742 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1743 return;
10474ae8 1744
7f59f492 1745 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8
AG
1746
1747 r = kvm_arch_hardware_enable(NULL);
1748
1749 if (r) {
1750 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1751 atomic_inc(&hardware_enable_failed);
1752 printk(KERN_INFO "kvm: enabling virtualization on "
1753 "CPU%d failed\n", cpu);
1754 }
1b6c0168
AK
1755}
1756
1757static void hardware_disable(void *junk)
1758{
1759 int cpu = raw_smp_processor_id();
1760
7f59f492 1761 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1762 return;
7f59f492 1763 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 1764 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1765}
1766
10474ae8
AG
1767static void hardware_disable_all_nolock(void)
1768{
1769 BUG_ON(!kvm_usage_count);
1770
1771 kvm_usage_count--;
1772 if (!kvm_usage_count)
1773 on_each_cpu(hardware_disable, NULL, 1);
1774}
1775
1776static void hardware_disable_all(void)
1777{
1778 spin_lock(&kvm_lock);
1779 hardware_disable_all_nolock();
1780 spin_unlock(&kvm_lock);
1781}
1782
1783static int hardware_enable_all(void)
1784{
1785 int r = 0;
1786
1787 spin_lock(&kvm_lock);
1788
1789 kvm_usage_count++;
1790 if (kvm_usage_count == 1) {
1791 atomic_set(&hardware_enable_failed, 0);
1792 on_each_cpu(hardware_enable, NULL, 1);
1793
1794 if (atomic_read(&hardware_enable_failed)) {
1795 hardware_disable_all_nolock();
1796 r = -EBUSY;
1797 }
1798 }
1799
1800 spin_unlock(&kvm_lock);
1801
1802 return r;
1803}
1804
774c47f1
AK
1805static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1806 void *v)
1807{
1808 int cpu = (long)v;
1809
10474ae8
AG
1810 if (!kvm_usage_count)
1811 return NOTIFY_OK;
1812
1a6f4d7f 1813 val &= ~CPU_TASKS_FROZEN;
774c47f1 1814 switch (val) {
cec9ad27 1815 case CPU_DYING:
6ec8a856
AK
1816 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1817 cpu);
1818 hardware_disable(NULL);
1819 break;
774c47f1 1820 case CPU_UP_CANCELED:
43934a38
JK
1821 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1822 cpu);
8691e5a8 1823 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 1824 break;
43934a38
JK
1825 case CPU_ONLINE:
1826 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1827 cpu);
8691e5a8 1828 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
1829 break;
1830 }
1831 return NOTIFY_OK;
1832}
1833
4ecac3fd
AK
1834
1835asmlinkage void kvm_handle_fault_on_reboot(void)
1836{
1837 if (kvm_rebooting)
1838 /* spin while reset goes on */
1839 while (true)
1840 ;
1841 /* Fault while not rebooting. We want the trace. */
1842 BUG();
1843}
1844EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1845
9a2b85c6 1846static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1847 void *v)
9a2b85c6 1848{
8e1c1815
SY
1849 /*
1850 * Some (well, at least mine) BIOSes hang on reboot if
1851 * in vmx root mode.
1852 *
1853 * And Intel TXT required VMX off for all cpu when system shutdown.
1854 */
1855 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1856 kvm_rebooting = true;
1857 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
1858 return NOTIFY_OK;
1859}
1860
1861static struct notifier_block kvm_reboot_notifier = {
1862 .notifier_call = kvm_reboot,
1863 .priority = 0,
1864};
1865
2eeb2e94
GH
1866void kvm_io_bus_init(struct kvm_io_bus *bus)
1867{
1868 memset(bus, 0, sizeof(*bus));
1869}
1870
1871void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1872{
1873 int i;
1874
1875 for (i = 0; i < bus->dev_count; i++) {
1876 struct kvm_io_device *pos = bus->devs[i];
1877
1878 kvm_iodevice_destructor(pos);
1879 }
1880}
1881
bda9020e
MT
1882/* kvm_io_bus_write - called under kvm->slots_lock */
1883int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
1884 int len, const void *val)
2eeb2e94
GH
1885{
1886 int i;
bda9020e
MT
1887 for (i = 0; i < bus->dev_count; i++)
1888 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
1889 return 0;
1890 return -EOPNOTSUPP;
1891}
2eeb2e94 1892
bda9020e
MT
1893/* kvm_io_bus_read - called under kvm->slots_lock */
1894int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
1895{
1896 int i;
1897 for (i = 0; i < bus->dev_count; i++)
1898 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
1899 return 0;
1900 return -EOPNOTSUPP;
2eeb2e94
GH
1901}
1902
090b7aff 1903int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
6c474694
MT
1904 struct kvm_io_device *dev)
1905{
090b7aff
GH
1906 int ret;
1907
6c474694 1908 down_write(&kvm->slots_lock);
090b7aff 1909 ret = __kvm_io_bus_register_dev(bus, dev);
6c474694 1910 up_write(&kvm->slots_lock);
090b7aff
GH
1911
1912 return ret;
6c474694
MT
1913}
1914
1915/* An unlocked version. Caller must have write lock on slots_lock. */
090b7aff
GH
1916int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
1917 struct kvm_io_device *dev)
2eeb2e94 1918{
090b7aff
GH
1919 if (bus->dev_count > NR_IOBUS_DEVS-1)
1920 return -ENOSPC;
2eeb2e94
GH
1921
1922 bus->devs[bus->dev_count++] = dev;
090b7aff
GH
1923
1924 return 0;
1925}
1926
1927void kvm_io_bus_unregister_dev(struct kvm *kvm,
1928 struct kvm_io_bus *bus,
1929 struct kvm_io_device *dev)
1930{
1931 down_write(&kvm->slots_lock);
1932 __kvm_io_bus_unregister_dev(bus, dev);
1933 up_write(&kvm->slots_lock);
1934}
1935
1936/* An unlocked version. Caller must have write lock on slots_lock. */
1937void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
1938 struct kvm_io_device *dev)
1939{
1940 int i;
1941
1942 for (i = 0; i < bus->dev_count; i++)
1943 if (bus->devs[i] == dev) {
1944 bus->devs[i] = bus->devs[--bus->dev_count];
1945 break;
1946 }
2eeb2e94
GH
1947}
1948
774c47f1
AK
1949static struct notifier_block kvm_cpu_notifier = {
1950 .notifier_call = kvm_cpu_hotplug,
1951 .priority = 20, /* must be > scheduler priority */
1952};
1953
8b88b099 1954static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
1955{
1956 unsigned offset = (long)_offset;
ba1389b7
AK
1957 struct kvm *kvm;
1958
8b88b099 1959 *val = 0;
ba1389b7
AK
1960 spin_lock(&kvm_lock);
1961 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 1962 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 1963 spin_unlock(&kvm_lock);
8b88b099 1964 return 0;
ba1389b7
AK
1965}
1966
1967DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1968
8b88b099 1969static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
1970{
1971 unsigned offset = (long)_offset;
1165f5fe
AK
1972 struct kvm *kvm;
1973 struct kvm_vcpu *vcpu;
1974 int i;
1975
8b88b099 1976 *val = 0;
1165f5fe
AK
1977 spin_lock(&kvm_lock);
1978 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
1979 kvm_for_each_vcpu(i, vcpu, kvm)
1980 *val += *(u32 *)((void *)vcpu + offset);
1981
1165f5fe 1982 spin_unlock(&kvm_lock);
8b88b099 1983 return 0;
1165f5fe
AK
1984}
1985
ba1389b7
AK
1986DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1987
828c0950 1988static const struct file_operations *stat_fops[] = {
ba1389b7
AK
1989 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1990 [KVM_STAT_VM] = &vm_stat_fops,
1991};
1165f5fe 1992
a16b043c 1993static void kvm_init_debug(void)
6aa8b732
AK
1994{
1995 struct kvm_stats_debugfs_item *p;
1996
76f7c879 1997 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1998 for (p = debugfs_entries; p->name; ++p)
76f7c879 1999 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2000 (void *)(long)p->offset,
ba1389b7 2001 stat_fops[p->kind]);
6aa8b732
AK
2002}
2003
2004static void kvm_exit_debug(void)
2005{
2006 struct kvm_stats_debugfs_item *p;
2007
2008 for (p = debugfs_entries; p->name; ++p)
2009 debugfs_remove(p->dentry);
76f7c879 2010 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2011}
2012
59ae6c6b
AK
2013static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2014{
10474ae8
AG
2015 if (kvm_usage_count)
2016 hardware_disable(NULL);
59ae6c6b
AK
2017 return 0;
2018}
2019
2020static int kvm_resume(struct sys_device *dev)
2021{
10474ae8
AG
2022 if (kvm_usage_count)
2023 hardware_enable(NULL);
59ae6c6b
AK
2024 return 0;
2025}
2026
2027static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2028 .name = "kvm",
59ae6c6b
AK
2029 .suspend = kvm_suspend,
2030 .resume = kvm_resume,
2031};
2032
2033static struct sys_device kvm_sysdev = {
2034 .id = 0,
2035 .cls = &kvm_sysdev_class,
2036};
2037
cea7bb21 2038struct page *bad_page;
35149e21 2039pfn_t bad_pfn;
6aa8b732 2040
15ad7146
AK
2041static inline
2042struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2043{
2044 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2045}
2046
2047static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2048{
2049 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2050
e9b11c17 2051 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2052}
2053
2054static void kvm_sched_out(struct preempt_notifier *pn,
2055 struct task_struct *next)
2056{
2057 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2058
e9b11c17 2059 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2060}
2061
f8c16bba 2062int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2063 struct module *module)
6aa8b732
AK
2064{
2065 int r;
002c7f7c 2066 int cpu;
6aa8b732 2067
f8c16bba
ZX
2068 r = kvm_arch_init(opaque);
2069 if (r)
d2308784 2070 goto out_fail;
cb498ea2
ZX
2071
2072 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2073
2074 if (bad_page == NULL) {
2075 r = -ENOMEM;
2076 goto out;
2077 }
2078
35149e21
AL
2079 bad_pfn = page_to_pfn(bad_page);
2080
8437a617 2081 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2082 r = -ENOMEM;
2083 goto out_free_0;
2084 }
2085
e9b11c17 2086 r = kvm_arch_hardware_setup();
6aa8b732 2087 if (r < 0)
7f59f492 2088 goto out_free_0a;
6aa8b732 2089
002c7f7c
YS
2090 for_each_online_cpu(cpu) {
2091 smp_call_function_single(cpu,
e9b11c17 2092 kvm_arch_check_processor_compat,
8691e5a8 2093 &r, 1);
002c7f7c 2094 if (r < 0)
d2308784 2095 goto out_free_1;
002c7f7c
YS
2096 }
2097
774c47f1
AK
2098 r = register_cpu_notifier(&kvm_cpu_notifier);
2099 if (r)
d2308784 2100 goto out_free_2;
6aa8b732
AK
2101 register_reboot_notifier(&kvm_reboot_notifier);
2102
59ae6c6b
AK
2103 r = sysdev_class_register(&kvm_sysdev_class);
2104 if (r)
d2308784 2105 goto out_free_3;
59ae6c6b
AK
2106
2107 r = sysdev_register(&kvm_sysdev);
2108 if (r)
d2308784 2109 goto out_free_4;
59ae6c6b 2110
c16f862d
RR
2111 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2112 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2113 __alignof__(struct kvm_vcpu),
2114 0, NULL);
c16f862d
RR
2115 if (!kvm_vcpu_cache) {
2116 r = -ENOMEM;
d2308784 2117 goto out_free_5;
c16f862d
RR
2118 }
2119
6aa8b732 2120 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2121 kvm_vm_fops.owner = module;
2122 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2123
2124 r = misc_register(&kvm_dev);
2125 if (r) {
d77c26fc 2126 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2127 goto out_free;
2128 }
2129
15ad7146
AK
2130 kvm_preempt_ops.sched_in = kvm_sched_in;
2131 kvm_preempt_ops.sched_out = kvm_sched_out;
2132
0ea4ed8e
DW
2133 kvm_init_debug();
2134
c7addb90 2135 return 0;
6aa8b732
AK
2136
2137out_free:
c16f862d 2138 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2139out_free_5:
59ae6c6b 2140 sysdev_unregister(&kvm_sysdev);
d2308784 2141out_free_4:
59ae6c6b 2142 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2143out_free_3:
6aa8b732 2144 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2145 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2146out_free_2:
d2308784 2147out_free_1:
e9b11c17 2148 kvm_arch_hardware_unsetup();
7f59f492
RR
2149out_free_0a:
2150 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2151out_free_0:
2152 __free_page(bad_page);
ca45aaae 2153out:
f8c16bba 2154 kvm_arch_exit();
d2308784 2155out_fail:
6aa8b732
AK
2156 return r;
2157}
cb498ea2 2158EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2159
cb498ea2 2160void kvm_exit(void)
6aa8b732 2161{
229456fc 2162 tracepoint_synchronize_unregister();
0ea4ed8e 2163 kvm_exit_debug();
6aa8b732 2164 misc_deregister(&kvm_dev);
c16f862d 2165 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2166 sysdev_unregister(&kvm_sysdev);
2167 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2168 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2169 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2170 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2171 kvm_arch_hardware_unsetup();
f8c16bba 2172 kvm_arch_exit();
7f59f492 2173 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2174 __free_page(bad_page);
6aa8b732 2175}
cb498ea2 2176EXPORT_SYMBOL_GPL(kvm_exit);