]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kvm/mmu.c
KVM: Introduce kvm_set_irq to inject interrupts in guests
[net-next-2.6.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
05da4558 30#include <linux/hugetlb.h>
2f333bcb 31#include <linux/compiler.h>
6aa8b732 32
e495606d
AK
33#include <asm/page.h>
34#include <asm/cmpxchg.h>
4e542370 35#include <asm/io.h>
6aa8b732 36
18552672
JR
37/*
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
43 */
2f333bcb 44bool tdp_enabled = false;
18552672 45
37a7d8b0
AK
46#undef MMU_DEBUG
47
48#undef AUDIT
49
50#ifdef AUDIT
51static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52#else
53static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54#endif
55
56#ifdef MMU_DEBUG
57
58#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61#else
62
63#define pgprintk(x...) do { } while (0)
64#define rmap_printk(x...) do { } while (0)
65
66#endif
67
68#if defined(MMU_DEBUG) || defined(AUDIT)
6ada8cca
AK
69static int dbg = 0;
70module_param(dbg, bool, 0644);
37a7d8b0 71#endif
6aa8b732 72
d6c69ee9
YD
73#ifndef MMU_DEBUG
74#define ASSERT(x) do { } while (0)
75#else
6aa8b732
AK
76#define ASSERT(x) \
77 if (!(x)) { \
78 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
79 __FILE__, __LINE__, #x); \
80 }
d6c69ee9 81#endif
6aa8b732 82
6aa8b732
AK
83#define PT_FIRST_AVAIL_BITS_SHIFT 9
84#define PT64_SECOND_AVAIL_BITS_SHIFT 52
85
6aa8b732
AK
86#define VALID_PAGE(x) ((x) != INVALID_PAGE)
87
88#define PT64_LEVEL_BITS 9
89
90#define PT64_LEVEL_SHIFT(level) \
d77c26fc 91 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
92
93#define PT64_LEVEL_MASK(level) \
94 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95
96#define PT64_INDEX(address, level)\
97 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
98
99
100#define PT32_LEVEL_BITS 10
101
102#define PT32_LEVEL_SHIFT(level) \
d77c26fc 103 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
104
105#define PT32_LEVEL_MASK(level) \
106 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107
108#define PT32_INDEX(address, level)\
109 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110
111
27aba766 112#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
113#define PT64_DIR_BASE_ADDR_MASK \
114 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115
116#define PT32_BASE_ADDR_MASK PAGE_MASK
117#define PT32_DIR_BASE_ADDR_MASK \
118 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
119
79539cec
AK
120#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
121 | PT64_NX_MASK)
6aa8b732
AK
122
123#define PFERR_PRESENT_MASK (1U << 0)
124#define PFERR_WRITE_MASK (1U << 1)
125#define PFERR_USER_MASK (1U << 2)
73b1087e 126#define PFERR_FETCH_MASK (1U << 4)
6aa8b732 127
6aa8b732
AK
128#define PT_DIRECTORY_LEVEL 2
129#define PT_PAGE_TABLE_LEVEL 1
130
cd4a4e53
AK
131#define RMAP_EXT 4
132
fe135d2c
AK
133#define ACC_EXEC_MASK 1
134#define ACC_WRITE_MASK PT_WRITABLE_MASK
135#define ACC_USER_MASK PT_USER_MASK
136#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137
2f333bcb
MT
138struct kvm_pv_mmu_op_buffer {
139 void *ptr;
140 unsigned len;
141 unsigned processed;
142 char buf[512] __aligned(sizeof(long));
143};
144
cd4a4e53
AK
145struct kvm_rmap_desc {
146 u64 *shadow_ptes[RMAP_EXT];
147 struct kvm_rmap_desc *more;
148};
149
b5a33a75
AK
150static struct kmem_cache *pte_chain_cache;
151static struct kmem_cache *rmap_desc_cache;
d3d25b04 152static struct kmem_cache *mmu_page_header_cache;
b5a33a75 153
c7addb90
AK
154static u64 __read_mostly shadow_trap_nonpresent_pte;
155static u64 __read_mostly shadow_notrap_nonpresent_pte;
7b52345e
SY
156static u64 __read_mostly shadow_base_present_pte;
157static u64 __read_mostly shadow_nx_mask;
158static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
159static u64 __read_mostly shadow_user_mask;
160static u64 __read_mostly shadow_accessed_mask;
161static u64 __read_mostly shadow_dirty_mask;
c7addb90
AK
162
163void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
164{
165 shadow_trap_nonpresent_pte = trap_pte;
166 shadow_notrap_nonpresent_pte = notrap_pte;
167}
168EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
169
7b52345e
SY
170void kvm_mmu_set_base_ptes(u64 base_pte)
171{
172 shadow_base_present_pte = base_pte;
173}
174EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
175
176void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
177 u64 dirty_mask, u64 nx_mask, u64 x_mask)
178{
179 shadow_user_mask = user_mask;
180 shadow_accessed_mask = accessed_mask;
181 shadow_dirty_mask = dirty_mask;
182 shadow_nx_mask = nx_mask;
183 shadow_x_mask = x_mask;
184}
185EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
186
6aa8b732
AK
187static int is_write_protection(struct kvm_vcpu *vcpu)
188{
ad312c7c 189 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
190}
191
192static int is_cpuid_PSE36(void)
193{
194 return 1;
195}
196
73b1087e
AK
197static int is_nx(struct kvm_vcpu *vcpu)
198{
ad312c7c 199 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
200}
201
6aa8b732
AK
202static int is_present_pte(unsigned long pte)
203{
204 return pte & PT_PRESENT_MASK;
205}
206
c7addb90
AK
207static int is_shadow_present_pte(u64 pte)
208{
c7addb90
AK
209 return pte != shadow_trap_nonpresent_pte
210 && pte != shadow_notrap_nonpresent_pte;
211}
212
05da4558
MT
213static int is_large_pte(u64 pte)
214{
215 return pte & PT_PAGE_SIZE_MASK;
216}
217
6aa8b732
AK
218static int is_writeble_pte(unsigned long pte)
219{
220 return pte & PT_WRITABLE_MASK;
221}
222
e3c5e7ec
AK
223static int is_dirty_pte(unsigned long pte)
224{
7b52345e 225 return pte & shadow_dirty_mask;
e3c5e7ec
AK
226}
227
cd4a4e53
AK
228static int is_rmap_pte(u64 pte)
229{
4b1a80fa 230 return is_shadow_present_pte(pte);
cd4a4e53
AK
231}
232
35149e21 233static pfn_t spte_to_pfn(u64 pte)
0b49ea86 234{
35149e21 235 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
0b49ea86
AK
236}
237
da928521
AK
238static gfn_t pse36_gfn_delta(u32 gpte)
239{
240 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
241
242 return (gpte & PT32_DIR_PSE36_MASK) << shift;
243}
244
e663ee64
AK
245static void set_shadow_pte(u64 *sptep, u64 spte)
246{
247#ifdef CONFIG_X86_64
248 set_64bit((unsigned long *)sptep, spte);
249#else
250 set_64bit((unsigned long long *)sptep, spte);
251#endif
252}
253
e2dec939 254static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 255 struct kmem_cache *base_cache, int min)
714b93da
AK
256{
257 void *obj;
258
259 if (cache->nobjs >= min)
e2dec939 260 return 0;
714b93da 261 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 262 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 263 if (!obj)
e2dec939 264 return -ENOMEM;
714b93da
AK
265 cache->objects[cache->nobjs++] = obj;
266 }
e2dec939 267 return 0;
714b93da
AK
268}
269
270static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
271{
272 while (mc->nobjs)
273 kfree(mc->objects[--mc->nobjs]);
274}
275
c1158e63 276static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 277 int min)
c1158e63
AK
278{
279 struct page *page;
280
281 if (cache->nobjs >= min)
282 return 0;
283 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 284 page = alloc_page(GFP_KERNEL);
c1158e63
AK
285 if (!page)
286 return -ENOMEM;
287 set_page_private(page, 0);
288 cache->objects[cache->nobjs++] = page_address(page);
289 }
290 return 0;
291}
292
293static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
294{
295 while (mc->nobjs)
c4d198d5 296 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
297}
298
2e3e5882 299static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 300{
e2dec939
AK
301 int r;
302
ad312c7c 303 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 304 pte_chain_cache, 4);
e2dec939
AK
305 if (r)
306 goto out;
ad312c7c 307 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 308 rmap_desc_cache, 1);
d3d25b04
AK
309 if (r)
310 goto out;
ad312c7c 311 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
312 if (r)
313 goto out;
ad312c7c 314 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 315 mmu_page_header_cache, 4);
e2dec939
AK
316out:
317 return r;
714b93da
AK
318}
319
320static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
321{
ad312c7c
ZX
322 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
323 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
324 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
325 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
326}
327
328static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
329 size_t size)
330{
331 void *p;
332
333 BUG_ON(!mc->nobjs);
334 p = mc->objects[--mc->nobjs];
335 memset(p, 0, size);
336 return p;
337}
338
714b93da
AK
339static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
340{
ad312c7c 341 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
342 sizeof(struct kvm_pte_chain));
343}
344
90cb0529 345static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 346{
90cb0529 347 kfree(pc);
714b93da
AK
348}
349
350static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
351{
ad312c7c 352 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
353 sizeof(struct kvm_rmap_desc));
354}
355
90cb0529 356static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 357{
90cb0529 358 kfree(rd);
714b93da
AK
359}
360
05da4558
MT
361/*
362 * Return the pointer to the largepage write count for a given
363 * gfn, handling slots that are not large page aligned.
364 */
365static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
366{
367 unsigned long idx;
368
369 idx = (gfn / KVM_PAGES_PER_HPAGE) -
370 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
371 return &slot->lpage_info[idx].write_count;
372}
373
374static void account_shadowed(struct kvm *kvm, gfn_t gfn)
375{
376 int *write_count;
377
378 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
379 *write_count += 1;
05da4558
MT
380}
381
382static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
383{
384 int *write_count;
385
386 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
387 *write_count -= 1;
388 WARN_ON(*write_count < 0);
389}
390
391static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
392{
393 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
394 int *largepage_idx;
395
396 if (slot) {
397 largepage_idx = slot_largepage_idx(gfn, slot);
398 return *largepage_idx;
399 }
400
401 return 1;
402}
403
404static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
405{
406 struct vm_area_struct *vma;
407 unsigned long addr;
408
409 addr = gfn_to_hva(kvm, gfn);
410 if (kvm_is_error_hva(addr))
411 return 0;
412
413 vma = find_vma(current->mm, addr);
414 if (vma && is_vm_hugetlb_page(vma))
415 return 1;
416
417 return 0;
418}
419
420static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
421{
422 struct kvm_memory_slot *slot;
423
424 if (has_wrprotected_page(vcpu->kvm, large_gfn))
425 return 0;
426
427 if (!host_largepage_backed(vcpu->kvm, large_gfn))
428 return 0;
429
430 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
431 if (slot && slot->dirty_bitmap)
432 return 0;
433
434 return 1;
435}
436
290fc38d
IE
437/*
438 * Take gfn and return the reverse mapping to it.
439 * Note: gfn must be unaliased before this function get called
440 */
441
05da4558 442static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
290fc38d
IE
443{
444 struct kvm_memory_slot *slot;
05da4558 445 unsigned long idx;
290fc38d
IE
446
447 slot = gfn_to_memslot(kvm, gfn);
05da4558
MT
448 if (!lpage)
449 return &slot->rmap[gfn - slot->base_gfn];
450
451 idx = (gfn / KVM_PAGES_PER_HPAGE) -
452 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
453
454 return &slot->lpage_info[idx].rmap_pde;
290fc38d
IE
455}
456
cd4a4e53
AK
457/*
458 * Reverse mapping data structures:
459 *
290fc38d
IE
460 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
461 * that points to page_address(page).
cd4a4e53 462 *
290fc38d
IE
463 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
464 * containing more mappings.
cd4a4e53 465 */
05da4558 466static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
cd4a4e53 467{
4db35314 468 struct kvm_mmu_page *sp;
cd4a4e53 469 struct kvm_rmap_desc *desc;
290fc38d 470 unsigned long *rmapp;
cd4a4e53
AK
471 int i;
472
473 if (!is_rmap_pte(*spte))
474 return;
290fc38d 475 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
476 sp = page_header(__pa(spte));
477 sp->gfns[spte - sp->spt] = gfn;
05da4558 478 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
290fc38d 479 if (!*rmapp) {
cd4a4e53 480 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
481 *rmapp = (unsigned long)spte;
482 } else if (!(*rmapp & 1)) {
cd4a4e53 483 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 484 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 485 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 486 desc->shadow_ptes[1] = spte;
290fc38d 487 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
488 } else {
489 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 490 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
491 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
492 desc = desc->more;
493 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 494 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
495 desc = desc->more;
496 }
497 for (i = 0; desc->shadow_ptes[i]; ++i)
498 ;
499 desc->shadow_ptes[i] = spte;
500 }
501}
502
290fc38d 503static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
504 struct kvm_rmap_desc *desc,
505 int i,
506 struct kvm_rmap_desc *prev_desc)
507{
508 int j;
509
510 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
511 ;
512 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 513 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
514 if (j != 0)
515 return;
516 if (!prev_desc && !desc->more)
290fc38d 517 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
518 else
519 if (prev_desc)
520 prev_desc->more = desc->more;
521 else
290fc38d 522 *rmapp = (unsigned long)desc->more | 1;
90cb0529 523 mmu_free_rmap_desc(desc);
cd4a4e53
AK
524}
525
290fc38d 526static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 527{
cd4a4e53
AK
528 struct kvm_rmap_desc *desc;
529 struct kvm_rmap_desc *prev_desc;
4db35314 530 struct kvm_mmu_page *sp;
35149e21 531 pfn_t pfn;
290fc38d 532 unsigned long *rmapp;
cd4a4e53
AK
533 int i;
534
535 if (!is_rmap_pte(*spte))
536 return;
4db35314 537 sp = page_header(__pa(spte));
35149e21 538 pfn = spte_to_pfn(*spte);
7b52345e 539 if (*spte & shadow_accessed_mask)
35149e21 540 kvm_set_pfn_accessed(pfn);
b4231d61 541 if (is_writeble_pte(*spte))
35149e21 542 kvm_release_pfn_dirty(pfn);
b4231d61 543 else
35149e21 544 kvm_release_pfn_clean(pfn);
05da4558 545 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
290fc38d 546 if (!*rmapp) {
cd4a4e53
AK
547 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
548 BUG();
290fc38d 549 } else if (!(*rmapp & 1)) {
cd4a4e53 550 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 551 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
552 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
553 spte, *spte);
554 BUG();
555 }
290fc38d 556 *rmapp = 0;
cd4a4e53
AK
557 } else {
558 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 559 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
560 prev_desc = NULL;
561 while (desc) {
562 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
563 if (desc->shadow_ptes[i] == spte) {
290fc38d 564 rmap_desc_remove_entry(rmapp,
714b93da 565 desc, i,
cd4a4e53
AK
566 prev_desc);
567 return;
568 }
569 prev_desc = desc;
570 desc = desc->more;
571 }
572 BUG();
573 }
574}
575
98348e95 576static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 577{
374cbac0 578 struct kvm_rmap_desc *desc;
98348e95
IE
579 struct kvm_rmap_desc *prev_desc;
580 u64 *prev_spte;
581 int i;
582
583 if (!*rmapp)
584 return NULL;
585 else if (!(*rmapp & 1)) {
586 if (!spte)
587 return (u64 *)*rmapp;
588 return NULL;
589 }
590 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
591 prev_desc = NULL;
592 prev_spte = NULL;
593 while (desc) {
594 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
595 if (prev_spte == spte)
596 return desc->shadow_ptes[i];
597 prev_spte = desc->shadow_ptes[i];
598 }
599 desc = desc->more;
600 }
601 return NULL;
602}
603
604static void rmap_write_protect(struct kvm *kvm, u64 gfn)
605{
290fc38d 606 unsigned long *rmapp;
374cbac0 607 u64 *spte;
caa5b8a5 608 int write_protected = 0;
374cbac0 609
4a4c9924 610 gfn = unalias_gfn(kvm, gfn);
05da4558 611 rmapp = gfn_to_rmap(kvm, gfn, 0);
374cbac0 612
98348e95
IE
613 spte = rmap_next(kvm, rmapp, NULL);
614 while (spte) {
374cbac0 615 BUG_ON(!spte);
374cbac0 616 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 617 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 618 if (is_writeble_pte(*spte)) {
9647c14c 619 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
620 write_protected = 1;
621 }
9647c14c 622 spte = rmap_next(kvm, rmapp, spte);
374cbac0 623 }
855149aa 624 if (write_protected) {
35149e21 625 pfn_t pfn;
855149aa
IE
626
627 spte = rmap_next(kvm, rmapp, NULL);
35149e21
AL
628 pfn = spte_to_pfn(*spte);
629 kvm_set_pfn_dirty(pfn);
855149aa
IE
630 }
631
05da4558
MT
632 /* check for huge page mappings */
633 rmapp = gfn_to_rmap(kvm, gfn, 1);
634 spte = rmap_next(kvm, rmapp, NULL);
635 while (spte) {
636 BUG_ON(!spte);
637 BUG_ON(!(*spte & PT_PRESENT_MASK));
638 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
639 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
640 if (is_writeble_pte(*spte)) {
641 rmap_remove(kvm, spte);
642 --kvm->stat.lpages;
643 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
6597ca09 644 spte = NULL;
05da4558
MT
645 write_protected = 1;
646 }
647 spte = rmap_next(kvm, rmapp, spte);
648 }
649
caa5b8a5
ED
650 if (write_protected)
651 kvm_flush_remote_tlbs(kvm);
05da4558
MT
652
653 account_shadowed(kvm, gfn);
374cbac0
AK
654}
655
e930bffe
AA
656static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
657{
658 u64 *spte;
659 int need_tlb_flush = 0;
660
661 while ((spte = rmap_next(kvm, rmapp, NULL))) {
662 BUG_ON(!(*spte & PT_PRESENT_MASK));
663 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
664 rmap_remove(kvm, spte);
665 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
666 need_tlb_flush = 1;
667 }
668 return need_tlb_flush;
669}
670
671static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
672 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
673{
674 int i;
675 int retval = 0;
676
677 /*
678 * If mmap_sem isn't taken, we can look the memslots with only
679 * the mmu_lock by skipping over the slots with userspace_addr == 0.
680 */
681 for (i = 0; i < kvm->nmemslots; i++) {
682 struct kvm_memory_slot *memslot = &kvm->memslots[i];
683 unsigned long start = memslot->userspace_addr;
684 unsigned long end;
685
686 /* mmu_lock protects userspace_addr */
687 if (!start)
688 continue;
689
690 end = start + (memslot->npages << PAGE_SHIFT);
691 if (hva >= start && hva < end) {
692 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
693 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
694 retval |= handler(kvm,
695 &memslot->lpage_info[
696 gfn_offset /
697 KVM_PAGES_PER_HPAGE].rmap_pde);
698 }
699 }
700
701 return retval;
702}
703
704int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
705{
706 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
707}
708
709static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
710{
711 u64 *spte;
712 int young = 0;
713
534e38b4
SY
714 /* always return old for EPT */
715 if (!shadow_accessed_mask)
716 return 0;
717
e930bffe
AA
718 spte = rmap_next(kvm, rmapp, NULL);
719 while (spte) {
720 int _young;
721 u64 _spte = *spte;
722 BUG_ON(!(_spte & PT_PRESENT_MASK));
723 _young = _spte & PT_ACCESSED_MASK;
724 if (_young) {
725 young = 1;
726 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
727 }
728 spte = rmap_next(kvm, rmapp, spte);
729 }
730 return young;
731}
732
733int kvm_age_hva(struct kvm *kvm, unsigned long hva)
734{
735 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
736}
737
d6c69ee9 738#ifdef MMU_DEBUG
47ad8e68 739static int is_empty_shadow_page(u64 *spt)
6aa8b732 740{
139bdb2d
AK
741 u64 *pos;
742 u64 *end;
743
47ad8e68 744 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
3c915510 745 if (is_shadow_present_pte(*pos)) {
b8688d51 746 printk(KERN_ERR "%s: %p %llx\n", __func__,
139bdb2d 747 pos, *pos);
6aa8b732 748 return 0;
139bdb2d 749 }
6aa8b732
AK
750 return 1;
751}
d6c69ee9 752#endif
6aa8b732 753
4db35314 754static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 755{
4db35314
AK
756 ASSERT(is_empty_shadow_page(sp->spt));
757 list_del(&sp->link);
758 __free_page(virt_to_page(sp->spt));
759 __free_page(virt_to_page(sp->gfns));
760 kfree(sp);
f05e70ac 761 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
762}
763
cea0f0e7
AK
764static unsigned kvm_page_table_hashfn(gfn_t gfn)
765{
1ae0a13d 766 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
767}
768
25c0de2c
AK
769static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
770 u64 *parent_pte)
6aa8b732 771{
4db35314 772 struct kvm_mmu_page *sp;
6aa8b732 773
ad312c7c
ZX
774 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
775 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
776 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 777 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 778 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
779 ASSERT(is_empty_shadow_page(sp->spt));
780 sp->slot_bitmap = 0;
781 sp->multimapped = 0;
782 sp->parent_pte = parent_pte;
f05e70ac 783 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 784 return sp;
6aa8b732
AK
785}
786
714b93da 787static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 788 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
789{
790 struct kvm_pte_chain *pte_chain;
791 struct hlist_node *node;
792 int i;
793
794 if (!parent_pte)
795 return;
4db35314
AK
796 if (!sp->multimapped) {
797 u64 *old = sp->parent_pte;
cea0f0e7
AK
798
799 if (!old) {
4db35314 800 sp->parent_pte = parent_pte;
cea0f0e7
AK
801 return;
802 }
4db35314 803 sp->multimapped = 1;
714b93da 804 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
805 INIT_HLIST_HEAD(&sp->parent_ptes);
806 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
807 pte_chain->parent_ptes[0] = old;
808 }
4db35314 809 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
810 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
811 continue;
812 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
813 if (!pte_chain->parent_ptes[i]) {
814 pte_chain->parent_ptes[i] = parent_pte;
815 return;
816 }
817 }
714b93da 818 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 819 BUG_ON(!pte_chain);
4db35314 820 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
821 pte_chain->parent_ptes[0] = parent_pte;
822}
823
4db35314 824static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
825 u64 *parent_pte)
826{
827 struct kvm_pte_chain *pte_chain;
828 struct hlist_node *node;
829 int i;
830
4db35314
AK
831 if (!sp->multimapped) {
832 BUG_ON(sp->parent_pte != parent_pte);
833 sp->parent_pte = NULL;
cea0f0e7
AK
834 return;
835 }
4db35314 836 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
837 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
838 if (!pte_chain->parent_ptes[i])
839 break;
840 if (pte_chain->parent_ptes[i] != parent_pte)
841 continue;
697fe2e2
AK
842 while (i + 1 < NR_PTE_CHAIN_ENTRIES
843 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
844 pte_chain->parent_ptes[i]
845 = pte_chain->parent_ptes[i + 1];
846 ++i;
847 }
848 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
849 if (i == 0) {
850 hlist_del(&pte_chain->link);
90cb0529 851 mmu_free_pte_chain(pte_chain);
4db35314
AK
852 if (hlist_empty(&sp->parent_ptes)) {
853 sp->multimapped = 0;
854 sp->parent_pte = NULL;
697fe2e2
AK
855 }
856 }
cea0f0e7
AK
857 return;
858 }
859 BUG();
860}
861
d761a501
AK
862static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
863 struct kvm_mmu_page *sp)
864{
865 int i;
866
867 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
868 sp->spt[i] = shadow_trap_nonpresent_pte;
869}
870
4db35314 871static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
872{
873 unsigned index;
874 struct hlist_head *bucket;
4db35314 875 struct kvm_mmu_page *sp;
cea0f0e7
AK
876 struct hlist_node *node;
877
b8688d51 878 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1ae0a13d 879 index = kvm_page_table_hashfn(gfn);
f05e70ac 880 bucket = &kvm->arch.mmu_page_hash[index];
4db35314 881 hlist_for_each_entry(sp, node, bucket, hash_link)
2e53d63a
MT
882 if (sp->gfn == gfn && !sp->role.metaphysical
883 && !sp->role.invalid) {
cea0f0e7 884 pgprintk("%s: found role %x\n",
b8688d51 885 __func__, sp->role.word);
4db35314 886 return sp;
cea0f0e7
AK
887 }
888 return NULL;
889}
890
891static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
892 gfn_t gfn,
893 gva_t gaddr,
894 unsigned level,
895 int metaphysical,
41074d07 896 unsigned access,
f7d9c7b7 897 u64 *parent_pte)
cea0f0e7
AK
898{
899 union kvm_mmu_page_role role;
900 unsigned index;
901 unsigned quadrant;
902 struct hlist_head *bucket;
4db35314 903 struct kvm_mmu_page *sp;
cea0f0e7
AK
904 struct hlist_node *node;
905
906 role.word = 0;
ad312c7c 907 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
908 role.level = level;
909 role.metaphysical = metaphysical;
41074d07 910 role.access = access;
ad312c7c 911 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
912 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
913 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
914 role.quadrant = quadrant;
915 }
b8688d51 916 pgprintk("%s: looking gfn %lx role %x\n", __func__,
cea0f0e7 917 gfn, role.word);
1ae0a13d 918 index = kvm_page_table_hashfn(gfn);
f05e70ac 919 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
920 hlist_for_each_entry(sp, node, bucket, hash_link)
921 if (sp->gfn == gfn && sp->role.word == role.word) {
922 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
b8688d51 923 pgprintk("%s: found\n", __func__);
4db35314 924 return sp;
cea0f0e7 925 }
dfc5aa00 926 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
927 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
928 if (!sp)
929 return sp;
b8688d51 930 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
4db35314
AK
931 sp->gfn = gfn;
932 sp->role = role;
933 hlist_add_head(&sp->hash_link, bucket);
374cbac0 934 if (!metaphysical)
4a4c9924 935 rmap_write_protect(vcpu->kvm, gfn);
131d8279
AK
936 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
937 vcpu->arch.mmu.prefetch_page(vcpu, sp);
938 else
939 nonpaging_prefetch_page(vcpu, sp);
4db35314 940 return sp;
cea0f0e7
AK
941}
942
90cb0529 943static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 944 struct kvm_mmu_page *sp)
a436036b 945{
697fe2e2
AK
946 unsigned i;
947 u64 *pt;
948 u64 ent;
949
4db35314 950 pt = sp->spt;
697fe2e2 951
4db35314 952 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 953 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 954 if (is_shadow_present_pte(pt[i]))
290fc38d 955 rmap_remove(kvm, &pt[i]);
c7addb90 956 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 957 }
90cb0529 958 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
959 return;
960 }
961
962 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
963 ent = pt[i];
964
05da4558
MT
965 if (is_shadow_present_pte(ent)) {
966 if (!is_large_pte(ent)) {
967 ent &= PT64_BASE_ADDR_MASK;
968 mmu_page_remove_parent_pte(page_header(ent),
969 &pt[i]);
970 } else {
971 --kvm->stat.lpages;
972 rmap_remove(kvm, &pt[i]);
973 }
974 }
c7addb90 975 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 976 }
90cb0529 977 kvm_flush_remote_tlbs(kvm);
a436036b
AK
978}
979
4db35314 980static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 981{
4db35314 982 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
983}
984
12b7d28f
AK
985static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
986{
987 int i;
988
989 for (i = 0; i < KVM_MAX_VCPUS; ++i)
990 if (kvm->vcpus[i])
ad312c7c 991 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
992}
993
4db35314 994static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
995{
996 u64 *parent_pte;
997
4cee5764 998 ++kvm->stat.mmu_shadow_zapped;
4db35314
AK
999 while (sp->multimapped || sp->parent_pte) {
1000 if (!sp->multimapped)
1001 parent_pte = sp->parent_pte;
a436036b
AK
1002 else {
1003 struct kvm_pte_chain *chain;
1004
4db35314 1005 chain = container_of(sp->parent_ptes.first,
a436036b
AK
1006 struct kvm_pte_chain, link);
1007 parent_pte = chain->parent_ptes[0];
1008 }
697fe2e2 1009 BUG_ON(!parent_pte);
4db35314 1010 kvm_mmu_put_page(sp, parent_pte);
c7addb90 1011 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 1012 }
4db35314
AK
1013 kvm_mmu_page_unlink_children(kvm, sp);
1014 if (!sp->root_count) {
376c53c2 1015 if (!sp->role.metaphysical && !sp->role.invalid)
05da4558 1016 unaccount_shadowed(kvm, sp->gfn);
4db35314
AK
1017 hlist_del(&sp->hash_link);
1018 kvm_mmu_free_page(kvm, sp);
2e53d63a 1019 } else {
376c53c2 1020 int invalid = sp->role.invalid;
f05e70ac 1021 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2e53d63a
MT
1022 sp->role.invalid = 1;
1023 kvm_reload_remote_mmus(kvm);
376c53c2
MT
1024 if (!sp->role.metaphysical && !invalid)
1025 unaccount_shadowed(kvm, sp->gfn);
2e53d63a 1026 }
12b7d28f 1027 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
1028}
1029
82ce2c96
IE
1030/*
1031 * Changing the number of mmu pages allocated to the vm
1032 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1033 */
1034void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1035{
1036 /*
1037 * If we set the number of mmu pages to be smaller be than the
1038 * number of actived pages , we must to free some mmu pages before we
1039 * change the value
1040 */
1041
f05e70ac 1042 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 1043 kvm_nr_mmu_pages) {
f05e70ac
ZX
1044 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1045 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
1046
1047 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1048 struct kvm_mmu_page *page;
1049
f05e70ac 1050 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
1051 struct kvm_mmu_page, link);
1052 kvm_mmu_zap_page(kvm, page);
1053 n_used_mmu_pages--;
1054 }
f05e70ac 1055 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
1056 }
1057 else
f05e70ac
ZX
1058 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1059 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 1060
f05e70ac 1061 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
1062}
1063
f67a46f4 1064static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
1065{
1066 unsigned index;
1067 struct hlist_head *bucket;
4db35314 1068 struct kvm_mmu_page *sp;
a436036b
AK
1069 struct hlist_node *node, *n;
1070 int r;
1071
b8688d51 1072 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
a436036b 1073 r = 0;
1ae0a13d 1074 index = kvm_page_table_hashfn(gfn);
f05e70ac 1075 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
1076 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1077 if (sp->gfn == gfn && !sp->role.metaphysical) {
b8688d51 1078 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
4db35314
AK
1079 sp->role.word);
1080 kvm_mmu_zap_page(kvm, sp);
a436036b
AK
1081 r = 1;
1082 }
1083 return r;
cea0f0e7
AK
1084}
1085
f67a46f4 1086static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 1087{
4db35314 1088 struct kvm_mmu_page *sp;
97a0a01e 1089
4db35314 1090 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
b8688d51 1091 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
4db35314 1092 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
1093 }
1094}
1095
38c335f1 1096static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 1097{
38c335f1 1098 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 1099 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 1100
4db35314 1101 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
1102}
1103
039576c0
AK
1104struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1105{
72dc67a6
IE
1106 struct page *page;
1107
ad312c7c 1108 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
1109
1110 if (gpa == UNMAPPED_GVA)
1111 return NULL;
72dc67a6
IE
1112
1113 down_read(&current->mm->mmap_sem);
1114 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1115 up_read(&current->mm->mmap_sem);
1116
1117 return page;
039576c0
AK
1118}
1119
1c4f1fd6
AK
1120static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1121 unsigned pt_access, unsigned pte_access,
1122 int user_fault, int write_fault, int dirty,
05da4558 1123 int *ptwrite, int largepage, gfn_t gfn,
35149e21 1124 pfn_t pfn, bool speculative)
1c4f1fd6
AK
1125{
1126 u64 spte;
15aaa819 1127 int was_rmapped = 0;
75e68e60 1128 int was_writeble = is_writeble_pte(*shadow_pte);
1c4f1fd6 1129
bc750ba8 1130 pgprintk("%s: spte %llx access %x write_fault %d"
1c4f1fd6 1131 " user_fault %d gfn %lx\n",
b8688d51 1132 __func__, *shadow_pte, pt_access,
1c4f1fd6
AK
1133 write_fault, user_fault, gfn);
1134
15aaa819 1135 if (is_rmap_pte(*shadow_pte)) {
05da4558
MT
1136 /*
1137 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1138 * the parent of the now unreachable PTE.
1139 */
1140 if (largepage && !is_large_pte(*shadow_pte)) {
1141 struct kvm_mmu_page *child;
1142 u64 pte = *shadow_pte;
1143
1144 child = page_header(pte & PT64_BASE_ADDR_MASK);
1145 mmu_page_remove_parent_pte(child, shadow_pte);
35149e21 1146 } else if (pfn != spte_to_pfn(*shadow_pte)) {
15aaa819 1147 pgprintk("hfn old %lx new %lx\n",
35149e21 1148 spte_to_pfn(*shadow_pte), pfn);
15aaa819 1149 rmap_remove(vcpu->kvm, shadow_pte);
05da4558
MT
1150 } else {
1151 if (largepage)
1152 was_rmapped = is_large_pte(*shadow_pte);
1153 else
1154 was_rmapped = 1;
15aaa819 1155 }
15aaa819
MT
1156 }
1157
1c4f1fd6
AK
1158 /*
1159 * We don't set the accessed bit, since we sometimes want to see
1160 * whether the guest actually used the pte (in order to detect
1161 * demand paging).
1162 */
7b52345e 1163 spte = shadow_base_present_pte | shadow_dirty_mask;
947da538
AK
1164 if (!speculative)
1165 pte_access |= PT_ACCESSED_MASK;
1c4f1fd6
AK
1166 if (!dirty)
1167 pte_access &= ~ACC_WRITE_MASK;
7b52345e
SY
1168 if (pte_access & ACC_EXEC_MASK)
1169 spte |= shadow_x_mask;
1170 else
1171 spte |= shadow_nx_mask;
1c4f1fd6 1172 if (pte_access & ACC_USER_MASK)
7b52345e 1173 spte |= shadow_user_mask;
05da4558
MT
1174 if (largepage)
1175 spte |= PT_PAGE_SIZE_MASK;
1c4f1fd6 1176
35149e21 1177 spte |= (u64)pfn << PAGE_SHIFT;
1c4f1fd6
AK
1178
1179 if ((pte_access & ACC_WRITE_MASK)
1180 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1181 struct kvm_mmu_page *shadow;
1182
1183 spte |= PT_WRITABLE_MASK;
1c4f1fd6
AK
1184
1185 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
05da4558
MT
1186 if (shadow ||
1187 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1c4f1fd6 1188 pgprintk("%s: found shadow page for %lx, marking ro\n",
b8688d51 1189 __func__, gfn);
1c4f1fd6
AK
1190 pte_access &= ~ACC_WRITE_MASK;
1191 if (is_writeble_pte(spte)) {
1192 spte &= ~PT_WRITABLE_MASK;
1193 kvm_x86_ops->tlb_flush(vcpu);
1194 }
1195 if (write_fault)
1196 *ptwrite = 1;
1197 }
1198 }
1199
1c4f1fd6
AK
1200 if (pte_access & ACC_WRITE_MASK)
1201 mark_page_dirty(vcpu->kvm, gfn);
1202
b8688d51 1203 pgprintk("%s: setting spte %llx\n", __func__, spte);
db475c39 1204 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
05da4558
MT
1205 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1206 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1c4f1fd6 1207 set_shadow_pte(shadow_pte, spte);
05da4558
MT
1208 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1209 && (spte & PT_PRESENT_MASK))
1210 ++vcpu->kvm->stat.lpages;
1211
1c4f1fd6
AK
1212 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1213 if (!was_rmapped) {
05da4558 1214 rmap_add(vcpu, shadow_pte, gfn, largepage);
1c4f1fd6 1215 if (!is_rmap_pte(*shadow_pte))
35149e21 1216 kvm_release_pfn_clean(pfn);
75e68e60
IE
1217 } else {
1218 if (was_writeble)
35149e21 1219 kvm_release_pfn_dirty(pfn);
75e68e60 1220 else
35149e21 1221 kvm_release_pfn_clean(pfn);
1c4f1fd6 1222 }
1b7fcd32 1223 if (speculative) {
ad312c7c 1224 vcpu->arch.last_pte_updated = shadow_pte;
1b7fcd32
AK
1225 vcpu->arch.last_pte_gfn = gfn;
1226 }
1c4f1fd6
AK
1227}
1228
6aa8b732
AK
1229static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1230{
1231}
1232
4d9976bb 1233static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
35149e21 1234 int largepage, gfn_t gfn, pfn_t pfn,
05da4558 1235 int level)
6aa8b732 1236{
ad312c7c 1237 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
e833240f 1238 int pt_write = 0;
6aa8b732
AK
1239
1240 for (; ; level--) {
1241 u32 index = PT64_INDEX(v, level);
1242 u64 *table;
1243
1244 ASSERT(VALID_PAGE(table_addr));
1245 table = __va(table_addr);
1246
1247 if (level == 1) {
e833240f 1248 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
35149e21 1249 0, write, 1, &pt_write, 0, gfn, pfn, false);
05da4558
MT
1250 return pt_write;
1251 }
1252
1253 if (largepage && level == 2) {
1254 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
35149e21 1255 0, write, 1, &pt_write, 1, gfn, pfn, false);
d196e343 1256 return pt_write;
6aa8b732
AK
1257 }
1258
c7addb90 1259 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 1260 struct kvm_mmu_page *new_table;
cea0f0e7 1261 gfn_t pseudo_gfn;
6aa8b732 1262
cea0f0e7
AK
1263 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1264 >> PAGE_SHIFT;
1265 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1266 v, level - 1,
f7d9c7b7 1267 1, ACC_ALL, &table[index]);
25c0de2c 1268 if (!new_table) {
6aa8b732 1269 pgprintk("nonpaging_map: ENOMEM\n");
35149e21 1270 kvm_release_pfn_clean(pfn);
6aa8b732
AK
1271 return -ENOMEM;
1272 }
1273
722c05f2
AK
1274 set_shadow_pte(&table[index],
1275 __pa(new_table->spt)
1276 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1277 | shadow_user_mask | shadow_x_mask);
6aa8b732
AK
1278 }
1279 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1280 }
1281}
1282
10589a46
MT
1283static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1284{
1285 int r;
05da4558 1286 int largepage = 0;
35149e21 1287 pfn_t pfn;
e930bffe 1288 unsigned long mmu_seq;
aaee2c94
MT
1289
1290 down_read(&current->mm->mmap_sem);
05da4558
MT
1291 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1292 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1293 largepage = 1;
1294 }
1295
e930bffe
AA
1296 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1297 /* implicit mb(), we'll read before PT lock is unlocked */
35149e21 1298 pfn = gfn_to_pfn(vcpu->kvm, gfn);
72dc67a6 1299 up_read(&current->mm->mmap_sem);
aaee2c94 1300
d196e343 1301 /* mmio */
35149e21
AL
1302 if (is_error_pfn(pfn)) {
1303 kvm_release_pfn_clean(pfn);
d196e343
AK
1304 return 1;
1305 }
1306
aaee2c94 1307 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
1308 if (mmu_notifier_retry(vcpu, mmu_seq))
1309 goto out_unlock;
eb787d10 1310 kvm_mmu_free_some_pages(vcpu);
35149e21 1311 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
05da4558 1312 PT32E_ROOT_LEVEL);
aaee2c94
MT
1313 spin_unlock(&vcpu->kvm->mmu_lock);
1314
aaee2c94 1315
10589a46 1316 return r;
e930bffe
AA
1317
1318out_unlock:
1319 spin_unlock(&vcpu->kvm->mmu_lock);
1320 kvm_release_pfn_clean(pfn);
1321 return 0;
10589a46
MT
1322}
1323
1324
17ac10ad
AK
1325static void mmu_free_roots(struct kvm_vcpu *vcpu)
1326{
1327 int i;
4db35314 1328 struct kvm_mmu_page *sp;
17ac10ad 1329
ad312c7c 1330 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1331 return;
aaee2c94 1332 spin_lock(&vcpu->kvm->mmu_lock);
ad312c7c
ZX
1333 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1334 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1335
4db35314
AK
1336 sp = page_header(root);
1337 --sp->root_count;
2e53d63a
MT
1338 if (!sp->root_count && sp->role.invalid)
1339 kvm_mmu_zap_page(vcpu->kvm, sp);
ad312c7c 1340 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1341 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1342 return;
1343 }
17ac10ad 1344 for (i = 0; i < 4; ++i) {
ad312c7c 1345 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1346
417726a3 1347 if (root) {
417726a3 1348 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1349 sp = page_header(root);
1350 --sp->root_count;
2e53d63a
MT
1351 if (!sp->root_count && sp->role.invalid)
1352 kvm_mmu_zap_page(vcpu->kvm, sp);
417726a3 1353 }
ad312c7c 1354 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1355 }
aaee2c94 1356 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1357 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1358}
1359
1360static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1361{
1362 int i;
cea0f0e7 1363 gfn_t root_gfn;
4db35314 1364 struct kvm_mmu_page *sp;
fb72d167 1365 int metaphysical = 0;
3bb65a22 1366
ad312c7c 1367 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad 1368
ad312c7c
ZX
1369 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1370 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1371
1372 ASSERT(!VALID_PAGE(root));
fb72d167
JR
1373 if (tdp_enabled)
1374 metaphysical = 1;
4db35314 1375 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
fb72d167
JR
1376 PT64_ROOT_LEVEL, metaphysical,
1377 ACC_ALL, NULL);
4db35314
AK
1378 root = __pa(sp->spt);
1379 ++sp->root_count;
ad312c7c 1380 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1381 return;
1382 }
fb72d167
JR
1383 metaphysical = !is_paging(vcpu);
1384 if (tdp_enabled)
1385 metaphysical = 1;
17ac10ad 1386 for (i = 0; i < 4; ++i) {
ad312c7c 1387 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1388
1389 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1390 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1391 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1392 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1393 continue;
1394 }
ad312c7c
ZX
1395 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1396 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1397 root_gfn = 0;
4db35314 1398 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
fb72d167 1399 PT32_ROOT_LEVEL, metaphysical,
f7d9c7b7 1400 ACC_ALL, NULL);
4db35314
AK
1401 root = __pa(sp->spt);
1402 ++sp->root_count;
ad312c7c 1403 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1404 }
ad312c7c 1405 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1406}
1407
6aa8b732
AK
1408static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1409{
1410 return vaddr;
1411}
1412
1413static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1414 u32 error_code)
6aa8b732 1415{
e833240f 1416 gfn_t gfn;
e2dec939 1417 int r;
6aa8b732 1418
b8688d51 1419 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
e2dec939
AK
1420 r = mmu_topup_memory_caches(vcpu);
1421 if (r)
1422 return r;
714b93da 1423
6aa8b732 1424 ASSERT(vcpu);
ad312c7c 1425 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1426
e833240f 1427 gfn = gva >> PAGE_SHIFT;
6aa8b732 1428
e833240f
AK
1429 return nonpaging_map(vcpu, gva & PAGE_MASK,
1430 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1431}
1432
fb72d167
JR
1433static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1434 u32 error_code)
1435{
35149e21 1436 pfn_t pfn;
fb72d167 1437 int r;
05da4558
MT
1438 int largepage = 0;
1439 gfn_t gfn = gpa >> PAGE_SHIFT;
e930bffe 1440 unsigned long mmu_seq;
fb72d167
JR
1441
1442 ASSERT(vcpu);
1443 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1444
1445 r = mmu_topup_memory_caches(vcpu);
1446 if (r)
1447 return r;
1448
1449 down_read(&current->mm->mmap_sem);
05da4558
MT
1450 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1451 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1452 largepage = 1;
1453 }
e930bffe
AA
1454 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1455 /* implicit mb(), we'll read before PT lock is unlocked */
35149e21 1456 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3200f405 1457 up_read(&current->mm->mmap_sem);
35149e21
AL
1458 if (is_error_pfn(pfn)) {
1459 kvm_release_pfn_clean(pfn);
fb72d167
JR
1460 return 1;
1461 }
1462 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
1463 if (mmu_notifier_retry(vcpu, mmu_seq))
1464 goto out_unlock;
fb72d167
JR
1465 kvm_mmu_free_some_pages(vcpu);
1466 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
67253af5 1467 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
fb72d167 1468 spin_unlock(&vcpu->kvm->mmu_lock);
fb72d167
JR
1469
1470 return r;
e930bffe
AA
1471
1472out_unlock:
1473 spin_unlock(&vcpu->kvm->mmu_lock);
1474 kvm_release_pfn_clean(pfn);
1475 return 0;
fb72d167
JR
1476}
1477
6aa8b732
AK
1478static void nonpaging_free(struct kvm_vcpu *vcpu)
1479{
17ac10ad 1480 mmu_free_roots(vcpu);
6aa8b732
AK
1481}
1482
1483static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1484{
ad312c7c 1485 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1486
1487 context->new_cr3 = nonpaging_new_cr3;
1488 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1489 context->gva_to_gpa = nonpaging_gva_to_gpa;
1490 context->free = nonpaging_free;
c7addb90 1491 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1492 context->root_level = 0;
6aa8b732 1493 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1494 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1495 return 0;
1496}
1497
d835dfec 1498void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1499{
1165f5fe 1500 ++vcpu->stat.tlb_flush;
cbdd1bea 1501 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1502}
1503
1504static void paging_new_cr3(struct kvm_vcpu *vcpu)
1505{
b8688d51 1506 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
cea0f0e7 1507 mmu_free_roots(vcpu);
6aa8b732
AK
1508}
1509
6aa8b732
AK
1510static void inject_page_fault(struct kvm_vcpu *vcpu,
1511 u64 addr,
1512 u32 err_code)
1513{
c3c91fee 1514 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1515}
1516
6aa8b732
AK
1517static void paging_free(struct kvm_vcpu *vcpu)
1518{
1519 nonpaging_free(vcpu);
1520}
1521
1522#define PTTYPE 64
1523#include "paging_tmpl.h"
1524#undef PTTYPE
1525
1526#define PTTYPE 32
1527#include "paging_tmpl.h"
1528#undef PTTYPE
1529
17ac10ad 1530static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1531{
ad312c7c 1532 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1533
1534 ASSERT(is_pae(vcpu));
1535 context->new_cr3 = paging_new_cr3;
1536 context->page_fault = paging64_page_fault;
6aa8b732 1537 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1538 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1539 context->free = paging_free;
17ac10ad
AK
1540 context->root_level = level;
1541 context->shadow_root_level = level;
17c3ba9d 1542 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1543 return 0;
1544}
1545
17ac10ad
AK
1546static int paging64_init_context(struct kvm_vcpu *vcpu)
1547{
1548 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1549}
1550
6aa8b732
AK
1551static int paging32_init_context(struct kvm_vcpu *vcpu)
1552{
ad312c7c 1553 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1554
1555 context->new_cr3 = paging_new_cr3;
1556 context->page_fault = paging32_page_fault;
6aa8b732
AK
1557 context->gva_to_gpa = paging32_gva_to_gpa;
1558 context->free = paging_free;
c7addb90 1559 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1560 context->root_level = PT32_ROOT_LEVEL;
1561 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1562 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1563 return 0;
1564}
1565
1566static int paging32E_init_context(struct kvm_vcpu *vcpu)
1567{
17ac10ad 1568 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1569}
1570
fb72d167
JR
1571static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1572{
1573 struct kvm_mmu *context = &vcpu->arch.mmu;
1574
1575 context->new_cr3 = nonpaging_new_cr3;
1576 context->page_fault = tdp_page_fault;
1577 context->free = nonpaging_free;
1578 context->prefetch_page = nonpaging_prefetch_page;
67253af5 1579 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
fb72d167
JR
1580 context->root_hpa = INVALID_PAGE;
1581
1582 if (!is_paging(vcpu)) {
1583 context->gva_to_gpa = nonpaging_gva_to_gpa;
1584 context->root_level = 0;
1585 } else if (is_long_mode(vcpu)) {
1586 context->gva_to_gpa = paging64_gva_to_gpa;
1587 context->root_level = PT64_ROOT_LEVEL;
1588 } else if (is_pae(vcpu)) {
1589 context->gva_to_gpa = paging64_gva_to_gpa;
1590 context->root_level = PT32E_ROOT_LEVEL;
1591 } else {
1592 context->gva_to_gpa = paging32_gva_to_gpa;
1593 context->root_level = PT32_ROOT_LEVEL;
1594 }
1595
1596 return 0;
1597}
1598
1599static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
6aa8b732
AK
1600{
1601 ASSERT(vcpu);
ad312c7c 1602 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1603
1604 if (!is_paging(vcpu))
1605 return nonpaging_init_context(vcpu);
a9058ecd 1606 else if (is_long_mode(vcpu))
6aa8b732
AK
1607 return paging64_init_context(vcpu);
1608 else if (is_pae(vcpu))
1609 return paging32E_init_context(vcpu);
1610 else
1611 return paging32_init_context(vcpu);
1612}
1613
fb72d167
JR
1614static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1615{
35149e21
AL
1616 vcpu->arch.update_pte.pfn = bad_pfn;
1617
fb72d167
JR
1618 if (tdp_enabled)
1619 return init_kvm_tdp_mmu(vcpu);
1620 else
1621 return init_kvm_softmmu(vcpu);
1622}
1623
6aa8b732
AK
1624static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1625{
1626 ASSERT(vcpu);
ad312c7c
ZX
1627 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1628 vcpu->arch.mmu.free(vcpu);
1629 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
1630 }
1631}
1632
1633int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1634{
1635 destroy_kvm_mmu(vcpu);
1636 return init_kvm_mmu(vcpu);
1637}
8668a3c4 1638EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1639
1640int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1641{
714b93da
AK
1642 int r;
1643
e2dec939 1644 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1645 if (r)
1646 goto out;
aaee2c94 1647 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1648 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 1649 mmu_alloc_roots(vcpu);
aaee2c94 1650 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1651 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 1652 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
1653out:
1654 return r;
6aa8b732 1655}
17c3ba9d
AK
1656EXPORT_SYMBOL_GPL(kvm_mmu_load);
1657
1658void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1659{
1660 mmu_free_roots(vcpu);
1661}
6aa8b732 1662
09072daf 1663static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 1664 struct kvm_mmu_page *sp,
ac1b714e
AK
1665 u64 *spte)
1666{
1667 u64 pte;
1668 struct kvm_mmu_page *child;
1669
1670 pte = *spte;
c7addb90 1671 if (is_shadow_present_pte(pte)) {
05da4558
MT
1672 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1673 is_large_pte(pte))
290fc38d 1674 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1675 else {
1676 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1677 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1678 }
1679 }
c7addb90 1680 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
05da4558
MT
1681 if (is_large_pte(pte))
1682 --vcpu->kvm->stat.lpages;
ac1b714e
AK
1683}
1684
0028425f 1685static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 1686 struct kvm_mmu_page *sp,
0028425f 1687 u64 *spte,
489f1d65 1688 const void *new)
0028425f 1689{
30945387
MT
1690 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1691 if (!vcpu->arch.update_pte.largepage ||
1692 sp->role.glevels == PT32_ROOT_LEVEL) {
1693 ++vcpu->kvm->stat.mmu_pde_zapped;
1694 return;
1695 }
1696 }
0028425f 1697
4cee5764 1698 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314 1699 if (sp->role.glevels == PT32_ROOT_LEVEL)
489f1d65 1700 paging32_update_pte(vcpu, sp, spte, new);
0028425f 1701 else
489f1d65 1702 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
1703}
1704
79539cec
AK
1705static bool need_remote_flush(u64 old, u64 new)
1706{
1707 if (!is_shadow_present_pte(old))
1708 return false;
1709 if (!is_shadow_present_pte(new))
1710 return true;
1711 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1712 return true;
1713 old ^= PT64_NX_MASK;
1714 new ^= PT64_NX_MASK;
1715 return (old & ~new & PT64_PERM_MASK) != 0;
1716}
1717
1718static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1719{
1720 if (need_remote_flush(old, new))
1721 kvm_flush_remote_tlbs(vcpu->kvm);
1722 else
1723 kvm_mmu_flush_tlb(vcpu);
1724}
1725
12b7d28f
AK
1726static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1727{
ad312c7c 1728 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f 1729
7b52345e 1730 return !!(spte && (*spte & shadow_accessed_mask));
12b7d28f
AK
1731}
1732
d7824fff
AK
1733static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1734 const u8 *new, int bytes)
1735{
1736 gfn_t gfn;
1737 int r;
1738 u64 gpte = 0;
35149e21 1739 pfn_t pfn;
d7824fff 1740
05da4558
MT
1741 vcpu->arch.update_pte.largepage = 0;
1742
d7824fff
AK
1743 if (bytes != 4 && bytes != 8)
1744 return;
1745
1746 /*
1747 * Assume that the pte write on a page table of the same type
1748 * as the current vcpu paging mode. This is nearly always true
1749 * (might be false while changing modes). Note it is verified later
1750 * by update_pte().
1751 */
1752 if (is_pae(vcpu)) {
1753 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1754 if ((bytes == 4) && (gpa % 4 == 0)) {
1755 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1756 if (r)
1757 return;
1758 memcpy((void *)&gpte + (gpa % 8), new, 4);
1759 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1760 memcpy((void *)&gpte, new, 8);
1761 }
1762 } else {
1763 if ((bytes == 4) && (gpa % 4 == 0))
1764 memcpy((void *)&gpte, new, 4);
1765 }
1766 if (!is_present_pte(gpte))
1767 return;
1768 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 1769
05da4558
MT
1770 down_read(&current->mm->mmap_sem);
1771 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1772 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1773 vcpu->arch.update_pte.largepage = 1;
1774 }
e930bffe
AA
1775 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
1776 /* implicit mb(), we'll read before PT lock is unlocked */
35149e21 1777 pfn = gfn_to_pfn(vcpu->kvm, gfn);
05da4558 1778 up_read(&current->mm->mmap_sem);
72dc67a6 1779
35149e21
AL
1780 if (is_error_pfn(pfn)) {
1781 kvm_release_pfn_clean(pfn);
d196e343
AK
1782 return;
1783 }
d7824fff 1784 vcpu->arch.update_pte.gfn = gfn;
35149e21 1785 vcpu->arch.update_pte.pfn = pfn;
d7824fff
AK
1786}
1787
1b7fcd32
AK
1788static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1789{
1790 u64 *spte = vcpu->arch.last_pte_updated;
1791
1792 if (spte
1793 && vcpu->arch.last_pte_gfn == gfn
1794 && shadow_accessed_mask
1795 && !(*spte & shadow_accessed_mask)
1796 && is_shadow_present_pte(*spte))
1797 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1798}
1799
09072daf 1800void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1801 const u8 *new, int bytes)
da4a00f0 1802{
9b7a0325 1803 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 1804 struct kvm_mmu_page *sp;
0e7bc4b9 1805 struct hlist_node *node, *n;
9b7a0325
AK
1806 struct hlist_head *bucket;
1807 unsigned index;
489f1d65 1808 u64 entry, gentry;
9b7a0325 1809 u64 *spte;
9b7a0325 1810 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1811 unsigned pte_size;
9b7a0325 1812 unsigned page_offset;
0e7bc4b9 1813 unsigned misaligned;
fce0657f 1814 unsigned quadrant;
9b7a0325 1815 int level;
86a5ba02 1816 int flooded = 0;
ac1b714e 1817 int npte;
489f1d65 1818 int r;
9b7a0325 1819
b8688d51 1820 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
d7824fff 1821 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 1822 spin_lock(&vcpu->kvm->mmu_lock);
1b7fcd32 1823 kvm_mmu_access_page(vcpu, gfn);
eb787d10 1824 kvm_mmu_free_some_pages(vcpu);
4cee5764 1825 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 1826 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 1827 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 1828 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
1829 ++vcpu->arch.last_pt_write_count;
1830 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
1831 flooded = 1;
1832 } else {
ad312c7c
ZX
1833 vcpu->arch.last_pt_write_gfn = gfn;
1834 vcpu->arch.last_pt_write_count = 1;
1835 vcpu->arch.last_pte_updated = NULL;
86a5ba02 1836 }
1ae0a13d 1837 index = kvm_page_table_hashfn(gfn);
f05e70ac 1838 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
1839 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1840 if (sp->gfn != gfn || sp->role.metaphysical)
9b7a0325 1841 continue;
4db35314 1842 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 1843 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1844 misaligned |= bytes < 4;
86a5ba02 1845 if (misaligned || flooded) {
0e7bc4b9
AK
1846 /*
1847 * Misaligned accesses are too much trouble to fix
1848 * up; also, they usually indicate a page is not used
1849 * as a page table.
86a5ba02
AK
1850 *
1851 * If we're seeing too many writes to a page,
1852 * it may no longer be a page table, or we may be
1853 * forking, in which case it is better to unmap the
1854 * page.
0e7bc4b9
AK
1855 */
1856 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314
AK
1857 gpa, bytes, sp->role.word);
1858 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1859 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
1860 continue;
1861 }
9b7a0325 1862 page_offset = offset;
4db35314 1863 level = sp->role.level;
ac1b714e 1864 npte = 1;
4db35314 1865 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1866 page_offset <<= 1; /* 32->64 */
1867 /*
1868 * A 32-bit pde maps 4MB while the shadow pdes map
1869 * only 2MB. So we need to double the offset again
1870 * and zap two pdes instead of one.
1871 */
1872 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1873 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1874 page_offset <<= 1;
1875 npte = 2;
1876 }
fce0657f 1877 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1878 page_offset &= ~PAGE_MASK;
4db35314 1879 if (quadrant != sp->role.quadrant)
fce0657f 1880 continue;
9b7a0325 1881 }
4db35314 1882 spte = &sp->spt[page_offset / sizeof(*spte)];
489f1d65
DE
1883 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1884 gentry = 0;
1885 r = kvm_read_guest_atomic(vcpu->kvm,
1886 gpa & ~(u64)(pte_size - 1),
1887 &gentry, pte_size);
1888 new = (const void *)&gentry;
1889 if (r < 0)
1890 new = NULL;
1891 }
ac1b714e 1892 while (npte--) {
79539cec 1893 entry = *spte;
4db35314 1894 mmu_pte_write_zap_pte(vcpu, sp, spte);
489f1d65
DE
1895 if (new)
1896 mmu_pte_write_new_pte(vcpu, sp, spte, new);
79539cec 1897 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 1898 ++spte;
9b7a0325 1899 }
9b7a0325 1900 }
c7addb90 1901 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 1902 spin_unlock(&vcpu->kvm->mmu_lock);
35149e21
AL
1903 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1904 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1905 vcpu->arch.update_pte.pfn = bad_pfn;
d7824fff 1906 }
da4a00f0
AK
1907}
1908
a436036b
AK
1909int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1910{
10589a46
MT
1911 gpa_t gpa;
1912 int r;
a436036b 1913
10589a46 1914 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
10589a46 1915
aaee2c94 1916 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 1917 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 1918 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 1919 return r;
a436036b 1920}
577bdc49 1921EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
a436036b 1922
22d95b12 1923void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 1924{
f05e70ac 1925 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 1926 struct kvm_mmu_page *sp;
ebeace86 1927
f05e70ac 1928 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
1929 struct kvm_mmu_page, link);
1930 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1931 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
1932 }
1933}
ebeace86 1934
3067714c
AK
1935int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1936{
1937 int r;
1938 enum emulation_result er;
1939
ad312c7c 1940 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
1941 if (r < 0)
1942 goto out;
1943
1944 if (!r) {
1945 r = 1;
1946 goto out;
1947 }
1948
b733bfb5
AK
1949 r = mmu_topup_memory_caches(vcpu);
1950 if (r)
1951 goto out;
1952
3067714c 1953 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
1954
1955 switch (er) {
1956 case EMULATE_DONE:
1957 return 1;
1958 case EMULATE_DO_MMIO:
1959 ++vcpu->stat.mmio_exits;
1960 return 0;
1961 case EMULATE_FAIL:
1962 kvm_report_emulation_failure(vcpu, "pagetable");
1963 return 1;
1964 default:
1965 BUG();
1966 }
1967out:
3067714c
AK
1968 return r;
1969}
1970EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1971
18552672
JR
1972void kvm_enable_tdp(void)
1973{
1974 tdp_enabled = true;
1975}
1976EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1977
5f4cb662
JR
1978void kvm_disable_tdp(void)
1979{
1980 tdp_enabled = false;
1981}
1982EXPORT_SYMBOL_GPL(kvm_disable_tdp);
1983
6aa8b732
AK
1984static void free_mmu_pages(struct kvm_vcpu *vcpu)
1985{
4db35314 1986 struct kvm_mmu_page *sp;
6aa8b732 1987
f05e70ac
ZX
1988 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1989 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
1990 struct kvm_mmu_page, link);
1991 kvm_mmu_zap_page(vcpu->kvm, sp);
8d2d73b9 1992 cond_resched();
f51234c2 1993 }
ad312c7c 1994 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
1995}
1996
1997static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1998{
17ac10ad 1999 struct page *page;
6aa8b732
AK
2000 int i;
2001
2002 ASSERT(vcpu);
2003
f05e70ac
ZX
2004 if (vcpu->kvm->arch.n_requested_mmu_pages)
2005 vcpu->kvm->arch.n_free_mmu_pages =
2006 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 2007 else
f05e70ac
ZX
2008 vcpu->kvm->arch.n_free_mmu_pages =
2009 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
2010 /*
2011 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2012 * Therefore we need to allocate shadow page tables in the first
2013 * 4GB of memory, which happens to fit the DMA32 zone.
2014 */
2015 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2016 if (!page)
2017 goto error_1;
ad312c7c 2018 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 2019 for (i = 0; i < 4; ++i)
ad312c7c 2020 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 2021
6aa8b732
AK
2022 return 0;
2023
2024error_1:
2025 free_mmu_pages(vcpu);
2026 return -ENOMEM;
2027}
2028
8018c27b 2029int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 2030{
6aa8b732 2031 ASSERT(vcpu);
ad312c7c 2032 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 2033
8018c27b
IM
2034 return alloc_mmu_pages(vcpu);
2035}
6aa8b732 2036
8018c27b
IM
2037int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2038{
2039 ASSERT(vcpu);
ad312c7c 2040 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 2041
8018c27b 2042 return init_kvm_mmu(vcpu);
6aa8b732
AK
2043}
2044
2045void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2046{
2047 ASSERT(vcpu);
2048
2049 destroy_kvm_mmu(vcpu);
2050 free_mmu_pages(vcpu);
714b93da 2051 mmu_free_memory_caches(vcpu);
6aa8b732
AK
2052}
2053
90cb0529 2054void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 2055{
4db35314 2056 struct kvm_mmu_page *sp;
6aa8b732 2057
f05e70ac 2058 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
2059 int i;
2060 u64 *pt;
2061
4db35314 2062 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
2063 continue;
2064
4db35314 2065 pt = sp->spt;
6aa8b732
AK
2066 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2067 /* avoid RMW */
9647c14c 2068 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 2069 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
2070 }
2071}
37a7d8b0 2072
90cb0529 2073void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 2074{
4db35314 2075 struct kvm_mmu_page *sp, *node;
e0fa826f 2076
aaee2c94 2077 spin_lock(&kvm->mmu_lock);
f05e70ac 2078 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
4db35314 2079 kvm_mmu_zap_page(kvm, sp);
aaee2c94 2080 spin_unlock(&kvm->mmu_lock);
e0fa826f 2081
90cb0529 2082 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
2083}
2084
8b2cf73c 2085static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
3ee16c81
IE
2086{
2087 struct kvm_mmu_page *page;
2088
2089 page = container_of(kvm->arch.active_mmu_pages.prev,
2090 struct kvm_mmu_page, link);
2091 kvm_mmu_zap_page(kvm, page);
2092}
2093
2094static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2095{
2096 struct kvm *kvm;
2097 struct kvm *kvm_freed = NULL;
2098 int cache_count = 0;
2099
2100 spin_lock(&kvm_lock);
2101
2102 list_for_each_entry(kvm, &vm_list, vm_list) {
2103 int npages;
2104
5a4c9288
MT
2105 if (!down_read_trylock(&kvm->slots_lock))
2106 continue;
3ee16c81
IE
2107 spin_lock(&kvm->mmu_lock);
2108 npages = kvm->arch.n_alloc_mmu_pages -
2109 kvm->arch.n_free_mmu_pages;
2110 cache_count += npages;
2111 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2112 kvm_mmu_remove_one_alloc_mmu_page(kvm);
2113 cache_count--;
2114 kvm_freed = kvm;
2115 }
2116 nr_to_scan--;
2117
2118 spin_unlock(&kvm->mmu_lock);
5a4c9288 2119 up_read(&kvm->slots_lock);
3ee16c81
IE
2120 }
2121 if (kvm_freed)
2122 list_move_tail(&kvm_freed->vm_list, &vm_list);
2123
2124 spin_unlock(&kvm_lock);
2125
2126 return cache_count;
2127}
2128
2129static struct shrinker mmu_shrinker = {
2130 .shrink = mmu_shrink,
2131 .seeks = DEFAULT_SEEKS * 10,
2132};
2133
2ddfd20e 2134static void mmu_destroy_caches(void)
b5a33a75
AK
2135{
2136 if (pte_chain_cache)
2137 kmem_cache_destroy(pte_chain_cache);
2138 if (rmap_desc_cache)
2139 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
2140 if (mmu_page_header_cache)
2141 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
2142}
2143
3ee16c81
IE
2144void kvm_mmu_module_exit(void)
2145{
2146 mmu_destroy_caches();
2147 unregister_shrinker(&mmu_shrinker);
2148}
2149
b5a33a75
AK
2150int kvm_mmu_module_init(void)
2151{
2152 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2153 sizeof(struct kvm_pte_chain),
20c2df83 2154 0, 0, NULL);
b5a33a75
AK
2155 if (!pte_chain_cache)
2156 goto nomem;
2157 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2158 sizeof(struct kvm_rmap_desc),
20c2df83 2159 0, 0, NULL);
b5a33a75
AK
2160 if (!rmap_desc_cache)
2161 goto nomem;
2162
d3d25b04
AK
2163 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2164 sizeof(struct kvm_mmu_page),
20c2df83 2165 0, 0, NULL);
d3d25b04
AK
2166 if (!mmu_page_header_cache)
2167 goto nomem;
2168
3ee16c81
IE
2169 register_shrinker(&mmu_shrinker);
2170
b5a33a75
AK
2171 return 0;
2172
2173nomem:
3ee16c81 2174 mmu_destroy_caches();
b5a33a75
AK
2175 return -ENOMEM;
2176}
2177
3ad82a7e
ZX
2178/*
2179 * Caculate mmu pages needed for kvm.
2180 */
2181unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2182{
2183 int i;
2184 unsigned int nr_mmu_pages;
2185 unsigned int nr_pages = 0;
2186
2187 for (i = 0; i < kvm->nmemslots; i++)
2188 nr_pages += kvm->memslots[i].npages;
2189
2190 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2191 nr_mmu_pages = max(nr_mmu_pages,
2192 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2193
2194 return nr_mmu_pages;
2195}
2196
2f333bcb
MT
2197static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2198 unsigned len)
2199{
2200 if (len > buffer->len)
2201 return NULL;
2202 return buffer->ptr;
2203}
2204
2205static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2206 unsigned len)
2207{
2208 void *ret;
2209
2210 ret = pv_mmu_peek_buffer(buffer, len);
2211 if (!ret)
2212 return ret;
2213 buffer->ptr += len;
2214 buffer->len -= len;
2215 buffer->processed += len;
2216 return ret;
2217}
2218
2219static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2220 gpa_t addr, gpa_t value)
2221{
2222 int bytes = 8;
2223 int r;
2224
2225 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2226 bytes = 4;
2227
2228 r = mmu_topup_memory_caches(vcpu);
2229 if (r)
2230 return r;
2231
3200f405 2232 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2f333bcb
MT
2233 return -EFAULT;
2234
2235 return 1;
2236}
2237
2238static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2239{
2240 kvm_x86_ops->tlb_flush(vcpu);
2241 return 1;
2242}
2243
2244static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2245{
2246 spin_lock(&vcpu->kvm->mmu_lock);
2247 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2248 spin_unlock(&vcpu->kvm->mmu_lock);
2249 return 1;
2250}
2251
2252static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2253 struct kvm_pv_mmu_op_buffer *buffer)
2254{
2255 struct kvm_mmu_op_header *header;
2256
2257 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2258 if (!header)
2259 return 0;
2260 switch (header->op) {
2261 case KVM_MMU_OP_WRITE_PTE: {
2262 struct kvm_mmu_op_write_pte *wpte;
2263
2264 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2265 if (!wpte)
2266 return 0;
2267 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2268 wpte->pte_val);
2269 }
2270 case KVM_MMU_OP_FLUSH_TLB: {
2271 struct kvm_mmu_op_flush_tlb *ftlb;
2272
2273 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2274 if (!ftlb)
2275 return 0;
2276 return kvm_pv_mmu_flush_tlb(vcpu);
2277 }
2278 case KVM_MMU_OP_RELEASE_PT: {
2279 struct kvm_mmu_op_release_pt *rpt;
2280
2281 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2282 if (!rpt)
2283 return 0;
2284 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2285 }
2286 default: return 0;
2287 }
2288}
2289
2290int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2291 gpa_t addr, unsigned long *ret)
2292{
2293 int r;
2294 struct kvm_pv_mmu_op_buffer buffer;
2295
2f333bcb
MT
2296 buffer.ptr = buffer.buf;
2297 buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
2298 buffer.processed = 0;
2299
2300 r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
2301 if (r)
2302 goto out;
2303
2304 while (buffer.len) {
2305 r = kvm_pv_mmu_op_one(vcpu, &buffer);
2306 if (r < 0)
2307 goto out;
2308 if (r == 0)
2309 break;
2310 }
2311
2312 r = 1;
2313out:
2314 *ret = buffer.processed;
2f333bcb
MT
2315 return r;
2316}
2317
37a7d8b0
AK
2318#ifdef AUDIT
2319
2320static const char *audit_msg;
2321
2322static gva_t canonicalize(gva_t gva)
2323{
2324#ifdef CONFIG_X86_64
2325 gva = (long long)(gva << 16) >> 16;
2326#endif
2327 return gva;
2328}
2329
2330static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2331 gva_t va, int level)
2332{
2333 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2334 int i;
2335 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2336
2337 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2338 u64 ent = pt[i];
2339
c7addb90 2340 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
2341 continue;
2342
2343 va = canonicalize(va);
c7addb90
AK
2344 if (level > 1) {
2345 if (ent == shadow_notrap_nonpresent_pte)
2346 printk(KERN_ERR "audit: (%s) nontrapping pte"
2347 " in nonleaf level: levels %d gva %lx"
2348 " level %d pte %llx\n", audit_msg,
ad312c7c 2349 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 2350
37a7d8b0 2351 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 2352 } else {
ad312c7c 2353 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
35149e21 2354 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
37a7d8b0 2355
c7addb90 2356 if (is_shadow_present_pte(ent)
37a7d8b0 2357 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
2358 printk(KERN_ERR "xx audit error: (%s) levels %d"
2359 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 2360 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
2361 va, gpa, hpa, ent,
2362 is_shadow_present_pte(ent));
c7addb90
AK
2363 else if (ent == shadow_notrap_nonpresent_pte
2364 && !is_error_hpa(hpa))
2365 printk(KERN_ERR "audit: (%s) notrap shadow,"
2366 " valid guest gva %lx\n", audit_msg, va);
35149e21 2367 kvm_release_pfn_clean(pfn);
c7addb90 2368
37a7d8b0
AK
2369 }
2370 }
2371}
2372
2373static void audit_mappings(struct kvm_vcpu *vcpu)
2374{
1ea252af 2375 unsigned i;
37a7d8b0 2376
ad312c7c
ZX
2377 if (vcpu->arch.mmu.root_level == 4)
2378 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
2379 else
2380 for (i = 0; i < 4; ++i)
ad312c7c 2381 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 2382 audit_mappings_page(vcpu,
ad312c7c 2383 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
2384 i << 30,
2385 2);
2386}
2387
2388static int count_rmaps(struct kvm_vcpu *vcpu)
2389{
2390 int nmaps = 0;
2391 int i, j, k;
2392
2393 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2394 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2395 struct kvm_rmap_desc *d;
2396
2397 for (j = 0; j < m->npages; ++j) {
290fc38d 2398 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 2399
290fc38d 2400 if (!*rmapp)
37a7d8b0 2401 continue;
290fc38d 2402 if (!(*rmapp & 1)) {
37a7d8b0
AK
2403 ++nmaps;
2404 continue;
2405 }
290fc38d 2406 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
2407 while (d) {
2408 for (k = 0; k < RMAP_EXT; ++k)
2409 if (d->shadow_ptes[k])
2410 ++nmaps;
2411 else
2412 break;
2413 d = d->more;
2414 }
2415 }
2416 }
2417 return nmaps;
2418}
2419
2420static int count_writable_mappings(struct kvm_vcpu *vcpu)
2421{
2422 int nmaps = 0;
4db35314 2423 struct kvm_mmu_page *sp;
37a7d8b0
AK
2424 int i;
2425
f05e70ac 2426 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2427 u64 *pt = sp->spt;
37a7d8b0 2428
4db35314 2429 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
2430 continue;
2431
2432 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2433 u64 ent = pt[i];
2434
2435 if (!(ent & PT_PRESENT_MASK))
2436 continue;
2437 if (!(ent & PT_WRITABLE_MASK))
2438 continue;
2439 ++nmaps;
2440 }
2441 }
2442 return nmaps;
2443}
2444
2445static void audit_rmap(struct kvm_vcpu *vcpu)
2446{
2447 int n_rmap = count_rmaps(vcpu);
2448 int n_actual = count_writable_mappings(vcpu);
2449
2450 if (n_rmap != n_actual)
2451 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
b8688d51 2452 __func__, audit_msg, n_rmap, n_actual);
37a7d8b0
AK
2453}
2454
2455static void audit_write_protection(struct kvm_vcpu *vcpu)
2456{
4db35314 2457 struct kvm_mmu_page *sp;
290fc38d
IE
2458 struct kvm_memory_slot *slot;
2459 unsigned long *rmapp;
2460 gfn_t gfn;
37a7d8b0 2461
f05e70ac 2462 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2463 if (sp->role.metaphysical)
37a7d8b0
AK
2464 continue;
2465
4db35314
AK
2466 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2467 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
2468 rmapp = &slot->rmap[gfn - slot->base_gfn];
2469 if (*rmapp)
37a7d8b0
AK
2470 printk(KERN_ERR "%s: (%s) shadow page has writable"
2471 " mappings: gfn %lx role %x\n",
b8688d51 2472 __func__, audit_msg, sp->gfn,
4db35314 2473 sp->role.word);
37a7d8b0
AK
2474 }
2475}
2476
2477static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2478{
2479 int olddbg = dbg;
2480
2481 dbg = 0;
2482 audit_msg = msg;
2483 audit_rmap(vcpu);
2484 audit_write_protection(vcpu);
2485 audit_mappings(vcpu);
2486 dbg = olddbg;
2487}
2488
2489#endif