]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kvm/paging_tmpl.h
KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch()
[net-next-2.6.git] / arch / x86 / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
abb9e0b8 28 #define shadow_walker shadow_walker64
6aa8b732
AK
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
6aa8b732 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
b3e4e63f 37 #define CMPXCHG cmpxchg
cea0f0e7 38 #else
b3e4e63f 39 #define CMPXCHG cmpxchg64
cea0f0e7
AK
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
6aa8b732
AK
42#elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
abb9e0b8 45 #define shadow_walker shadow_walker32
6aa8b732
AK
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
6aa8b732 50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 52 #define PT_MAX_FULL_LEVELS 2
b3e4e63f 53 #define CMPXCHG cmpxchg
6aa8b732
AK
54#else
55 #error Invalid PTTYPE value
56#endif
57
5fb07ddb
AK
58#define gpte_to_gfn FNAME(gpte_to_gfn)
59#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
60
6aa8b732
AK
61/*
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
64 */
65struct guest_walker {
66 int level;
cea0f0e7 67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
7819026e
MT
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
fe135d2c
AK
70 unsigned pt_access;
71 unsigned pte_access;
815af8d4 72 gfn_t gfn;
7993ba43 73 u32 error_code;
6aa8b732
AK
74};
75
abb9e0b8
AK
76struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
79 int user_fault;
80 int write_fault;
81 int largepage;
82 int *ptwrite;
83 pfn_t pfn;
84 u64 *sptep;
ad218f85 85 gpa_t pte_gpa;
abb9e0b8
AK
86};
87
5fb07ddb
AK
88static gfn_t gpte_to_gfn(pt_element_t gpte)
89{
90 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
91}
92
93static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
94{
95 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
96}
97
b3e4e63f
MT
98static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
99 gfn_t table_gfn, unsigned index,
100 pt_element_t orig_pte, pt_element_t new_pte)
101{
102 pt_element_t ret;
103 pt_element_t *table;
104 struct page *page;
105
106 page = gfn_to_page(kvm, table_gfn);
72dc67a6 107
b3e4e63f 108 table = kmap_atomic(page, KM_USER0);
b3e4e63f 109 ret = CMPXCHG(&table[index], orig_pte, new_pte);
b3e4e63f
MT
110 kunmap_atomic(table, KM_USER0);
111
112 kvm_release_page_dirty(page);
113
114 return (ret != orig_pte);
115}
116
bedbe4ee
AK
117static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
118{
119 unsigned access;
120
121 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
122#if PTTYPE == 64
123 if (is_nx(vcpu))
124 access &= ~(gpte >> PT64_NX_SHIFT);
125#endif
126 return access;
127}
128
ac79c978
AK
129/*
130 * Fetch a guest pte for a guest virtual address
131 */
7993ba43
AK
132static int FNAME(walk_addr)(struct guest_walker *walker,
133 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 134 int write_fault, int user_fault, int fetch_fault)
6aa8b732 135{
42bf3f0a 136 pt_element_t pte;
cea0f0e7 137 gfn_t table_gfn;
fe135d2c 138 unsigned index, pt_access, pte_access;
42bf3f0a 139 gpa_t pte_gpa;
6aa8b732 140
b8688d51 141 pgprintk("%s: addr %lx\n", __func__, addr);
b3e4e63f 142walk:
ad312c7c
ZX
143 walker->level = vcpu->arch.mmu.root_level;
144 pte = vcpu->arch.cr3;
1b0973bd
AK
145#if PTTYPE == 64
146 if (!is_long_mode(vcpu)) {
ad312c7c 147 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
42bf3f0a 148 if (!is_present_pte(pte))
7993ba43 149 goto not_present;
1b0973bd
AK
150 --walker->level;
151 }
152#endif
a9058ecd 153 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
24993d53 154 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 155
fe135d2c 156 pt_access = ACC_ALL;
ac79c978
AK
157
158 for (;;) {
42bf3f0a 159 index = PT_INDEX(addr, walker->level);
ac79c978 160
5fb07ddb 161 table_gfn = gpte_to_gfn(pte);
1755fbcc 162 pte_gpa = gfn_to_gpa(table_gfn);
ec8d4eae 163 pte_gpa += index * sizeof(pt_element_t);
42bf3f0a 164 walker->table_gfn[walker->level - 1] = table_gfn;
7819026e 165 walker->pte_gpa[walker->level - 1] = pte_gpa;
b8688d51 166 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
42bf3f0a
AK
167 walker->level - 1, table_gfn);
168
ec8d4eae 169 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
42bf3f0a
AK
170
171 if (!is_present_pte(pte))
7993ba43
AK
172 goto not_present;
173
42bf3f0a 174 if (write_fault && !is_writeble_pte(pte))
7993ba43
AK
175 if (user_fault || is_write_protection(vcpu))
176 goto access_error;
177
42bf3f0a 178 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
179 goto access_error;
180
73b1087e 181#if PTTYPE == 64
42bf3f0a 182 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
73b1087e
AK
183 goto access_error;
184#endif
185
42bf3f0a 186 if (!(pte & PT_ACCESSED_MASK)) {
bf3f8e86 187 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
188 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
189 index, pte, pte|PT_ACCESSED_MASK))
190 goto walk;
42bf3f0a 191 pte |= PT_ACCESSED_MASK;
bf3f8e86 192 }
815af8d4 193
bedbe4ee 194 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
fe135d2c 195
7819026e
MT
196 walker->ptes[walker->level - 1] = pte;
197
815af8d4 198 if (walker->level == PT_PAGE_TABLE_LEVEL) {
5fb07ddb 199 walker->gfn = gpte_to_gfn(pte);
815af8d4
AK
200 break;
201 }
202
203 if (walker->level == PT_DIRECTORY_LEVEL
42bf3f0a 204 && (pte & PT_PAGE_SIZE_MASK)
815af8d4 205 && (PTTYPE == 64 || is_pse(vcpu))) {
5fb07ddb 206 walker->gfn = gpte_to_gfn_pde(pte);
815af8d4 207 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
da928521
AK
208 if (PTTYPE == 32 && is_cpuid_PSE36())
209 walker->gfn += pse36_gfn_delta(pte);
ac79c978 210 break;
815af8d4 211 }
ac79c978 212
fe135d2c 213 pt_access = pte_access;
ac79c978
AK
214 --walker->level;
215 }
42bf3f0a
AK
216
217 if (write_fault && !is_dirty_pte(pte)) {
b3e4e63f
MT
218 bool ret;
219
42bf3f0a 220 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
221 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
222 pte|PT_DIRTY_MASK);
223 if (ret)
224 goto walk;
42bf3f0a 225 pte |= PT_DIRTY_MASK;
ad218f85 226 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte), 0);
7819026e 227 walker->ptes[walker->level - 1] = pte;
42bf3f0a
AK
228 }
229
fe135d2c
AK
230 walker->pt_access = pt_access;
231 walker->pte_access = pte_access;
232 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
b8688d51 233 __func__, (u64)pte, pt_access, pte_access);
7993ba43
AK
234 return 1;
235
236not_present:
237 walker->error_code = 0;
238 goto err;
239
240access_error:
241 walker->error_code = PFERR_PRESENT_MASK;
242
243err:
244 if (write_fault)
245 walker->error_code |= PFERR_WRITE_MASK;
246 if (user_fault)
247 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
248 if (fetch_fault)
249 walker->error_code |= PFERR_FETCH_MASK;
fe551881 250 return 0;
6aa8b732
AK
251}
252
0028425f 253static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
489f1d65 254 u64 *spte, const void *pte)
0028425f
AK
255{
256 pt_element_t gpte;
41074d07 257 unsigned pte_access;
35149e21 258 pfn_t pfn;
05da4558 259 int largepage = vcpu->arch.update_pte.largepage;
0028425f 260
0028425f 261 gpte = *(const pt_element_t *)pte;
c7addb90 262 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
489f1d65 263 if (!is_present_pte(gpte))
c7addb90
AK
264 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
265 return;
266 }
b8688d51 267 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
41074d07 268 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
d7824fff
AK
269 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
270 return;
35149e21
AL
271 pfn = vcpu->arch.update_pte.pfn;
272 if (is_error_pfn(pfn))
d7824fff 273 return;
e930bffe
AA
274 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
275 return;
35149e21 276 kvm_get_pfn(pfn);
1c4f1fd6 277 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
6cffe8ca
MT
278 gpte & PT_DIRTY_MASK, NULL, largepage,
279 gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
35149e21 280 pfn, true);
0028425f
AK
281}
282
6aa8b732
AK
283/*
284 * Fetch a shadow pte for a specific level in the paging hierarchy.
285 */
e7a04c99
AK
286static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
287 struct guest_walker *gw,
288 int user_fault, int write_fault, int largepage,
289 int *ptwrite, pfn_t pfn)
6aa8b732 290{
abb9e0b8
AK
291 unsigned access = gw->pt_access;
292 struct kvm_mmu_page *shadow_page;
e7a04c99 293 u64 spte, *sptep;
abb9e0b8
AK
294 int metaphysical;
295 gfn_t table_gfn;
296 int r;
e7a04c99 297 int level;
abb9e0b8 298 pt_element_t curr_pte;
e7a04c99 299 struct kvm_shadow_walk_iterator iterator;
abb9e0b8 300
e7a04c99
AK
301 if (!is_present_pte(gw->ptes[gw->level - 1]))
302 return NULL;
6aa8b732 303
e7a04c99
AK
304 for_each_shadow_entry(vcpu, addr, iterator) {
305 level = iterator.level;
306 sptep = iterator.sptep;
307 if (level == PT_PAGE_TABLE_LEVEL
308 || (largepage && level == PT_DIRECTORY_LEVEL)) {
309 mmu_set_spte(vcpu, sptep, access,
310 gw->pte_access & access,
311 user_fault, write_fault,
312 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
313 ptwrite, largepage,
314 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
315 gw->gfn, pfn, false);
316 break;
317 }
6aa8b732 318
e7a04c99
AK
319 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
320 continue;
abb9e0b8 321
e7a04c99
AK
322 if (is_large_pte(*sptep)) {
323 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
324 kvm_flush_remote_tlbs(vcpu->kvm);
325 rmap_remove(vcpu->kvm, sptep);
7819026e 326 }
ef0197e8 327
e7a04c99
AK
328 if (level == PT_DIRECTORY_LEVEL
329 && gw->level == PT_DIRECTORY_LEVEL) {
330 metaphysical = 1;
331 if (!is_dirty_pte(gw->ptes[level - 1]))
332 access &= ~ACC_WRITE_MASK;
333 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
334 } else {
335 metaphysical = 0;
336 table_gfn = gw->table_gfn[level - 2];
337 }
338 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
339 metaphysical, access, sptep);
340 if (!metaphysical) {
341 r = kvm_read_guest_atomic(vcpu->kvm,
342 gw->pte_gpa[level - 2],
343 &curr_pte, sizeof(curr_pte));
344 if (r || curr_pte != gw->ptes[level - 2]) {
345 kvm_mmu_put_page(shadow_page, sptep);
346 kvm_release_pfn_clean(pfn);
347 sptep = NULL;
348 break;
349 }
350 }
abb9e0b8 351
e7a04c99
AK
352 spte = __pa(shadow_page->spt)
353 | PT_PRESENT_MASK | PT_ACCESSED_MASK
354 | PT_WRITABLE_MASK | PT_USER_MASK;
355 *sptep = spte;
356 }
050e6499 357
e7a04c99 358 return sptep;
6aa8b732
AK
359}
360
6aa8b732
AK
361/*
362 * Page fault handler. There are several causes for a page fault:
363 * - there is no shadow pte for the guest pte
364 * - write access through a shadow pte marked read only so that we can set
365 * the dirty bit
366 * - write access to a shadow pte marked read only so we can update the page
367 * dirty bitmap, when userspace requests it
368 * - mmio access; in this case we will never install a present shadow pte
369 * - normal guest page fault due to the guest pte marked not present, not
370 * writable, or not executable
371 *
e2dec939
AK
372 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
373 * a negative value on error.
6aa8b732
AK
374 */
375static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
376 u32 error_code)
377{
378 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 379 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 380 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732
AK
381 struct guest_walker walker;
382 u64 *shadow_pte;
cea0f0e7 383 int write_pt = 0;
e2dec939 384 int r;
35149e21 385 pfn_t pfn;
05da4558 386 int largepage = 0;
e930bffe 387 unsigned long mmu_seq;
6aa8b732 388
b8688d51 389 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
37a7d8b0 390 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 391
e2dec939
AK
392 r = mmu_topup_memory_caches(vcpu);
393 if (r)
394 return r;
714b93da 395
6aa8b732
AK
396 /*
397 * Look up the shadow pte for the faulting address.
398 */
73b1087e
AK
399 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
400 fetch_fault);
6aa8b732
AK
401
402 /*
403 * The page is not mapped by the guest. Let the guest handle it.
404 */
7993ba43 405 if (!r) {
b8688d51 406 pgprintk("%s: guest page fault\n", __func__);
7993ba43 407 inject_page_fault(vcpu, addr, walker.error_code);
ad312c7c 408 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
6aa8b732
AK
409 return 0;
410 }
411
05da4558
MT
412 if (walker.level == PT_DIRECTORY_LEVEL) {
413 gfn_t large_gfn;
414 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
415 if (is_largepage_backed(vcpu, large_gfn)) {
416 walker.gfn = large_gfn;
417 largepage = 1;
418 }
419 }
e930bffe 420 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 421 smp_rmb();
35149e21 422 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
d7824fff 423
d196e343 424 /* mmio */
35149e21 425 if (is_error_pfn(pfn)) {
ebb0e626 426 pgprintk("gfn %lx is mmio\n", walker.gfn);
35149e21 427 kvm_release_pfn_clean(pfn);
d196e343
AK
428 return 1;
429 }
430
aaee2c94 431 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
432 if (mmu_notifier_retry(vcpu, mmu_seq))
433 goto out_unlock;
eb787d10 434 kvm_mmu_free_some_pages(vcpu);
97a0a01e 435 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
35149e21 436 largepage, &write_pt, pfn);
05da4558 437
b8688d51 438 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
97a0a01e 439 shadow_pte, *shadow_pte, write_pt);
cea0f0e7 440
a25f7e1f 441 if (!write_pt)
ad312c7c 442 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
a25f7e1f 443
1165f5fe 444 ++vcpu->stat.pf_fixed;
37a7d8b0 445 kvm_mmu_audit(vcpu, "post page fault (fixed)");
aaee2c94 446 spin_unlock(&vcpu->kvm->mmu_lock);
6aa8b732 447
cea0f0e7 448 return write_pt;
e930bffe
AA
449
450out_unlock:
451 spin_unlock(&vcpu->kvm->mmu_lock);
452 kvm_release_pfn_clean(pfn);
453 return 0;
6aa8b732
AK
454}
455
a7052897
MT
456static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
457 struct kvm_vcpu *vcpu, u64 addr,
458 u64 *sptep, int level)
459{
ad218f85
MT
460 struct shadow_walker *sw =
461 container_of(_sw, struct shadow_walker, walker);
a7052897 462
87917239
MT
463 /* FIXME: properly handle invlpg on large guest pages */
464 if (level == PT_PAGE_TABLE_LEVEL ||
465 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
ad218f85
MT
466 struct kvm_mmu_page *sp = page_header(__pa(sptep));
467
468 sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
469 sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
470
87917239 471 if (is_shadow_present_pte(*sptep)) {
a7052897 472 rmap_remove(vcpu->kvm, sptep);
87917239
MT
473 if (is_large_pte(*sptep))
474 --vcpu->kvm->stat.lpages;
475 }
a7052897
MT
476 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
477 return 1;
478 }
479 if (!is_shadow_present_pte(*sptep))
480 return 1;
481 return 0;
482}
483
484static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
485{
ad218f85 486 pt_element_t gpte;
a7052897
MT
487 struct shadow_walker walker = {
488 .walker = { .entry = FNAME(shadow_invlpg_entry), },
ad218f85 489 .pte_gpa = -1,
a7052897
MT
490 };
491
ad218f85 492 spin_lock(&vcpu->kvm->mmu_lock);
a7052897 493 walk_shadow(&walker.walker, vcpu, gva);
ad218f85
MT
494 spin_unlock(&vcpu->kvm->mmu_lock);
495 if (walker.pte_gpa == -1)
496 return;
497 if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte,
498 sizeof(pt_element_t)))
499 return;
500 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
501 if (mmu_topup_memory_caches(vcpu))
502 return;
503 kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte,
504 sizeof(pt_element_t), 0);
505 }
a7052897
MT
506}
507
6aa8b732
AK
508static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
509{
510 struct guest_walker walker;
e119d117
AK
511 gpa_t gpa = UNMAPPED_GVA;
512 int r;
6aa8b732 513
e119d117 514 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
6aa8b732 515
e119d117 516 if (r) {
1755fbcc 517 gpa = gfn_to_gpa(walker.gfn);
e119d117 518 gpa |= vaddr & ~PAGE_MASK;
6aa8b732
AK
519 }
520
521 return gpa;
522}
523
c7addb90
AK
524static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
525 struct kvm_mmu_page *sp)
526{
eab9f71f
AK
527 int i, j, offset, r;
528 pt_element_t pt[256 / sizeof(pt_element_t)];
529 gpa_t pte_gpa;
c7addb90 530
e5a4c8ca
AK
531 if (sp->role.metaphysical
532 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
c7addb90
AK
533 nonpaging_prefetch_page(vcpu, sp);
534 return;
535 }
536
eab9f71f
AK
537 pte_gpa = gfn_to_gpa(sp->gfn);
538 if (PTTYPE == 32) {
e5a4c8ca 539 offset = sp->role.quadrant << PT64_LEVEL_BITS;
eab9f71f
AK
540 pte_gpa += offset * sizeof(pt_element_t);
541 }
7ec54588 542
eab9f71f
AK
543 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
544 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
545 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
546 for (j = 0; j < ARRAY_SIZE(pt); ++j)
547 if (r || is_present_pte(pt[j]))
548 sp->spt[i+j] = shadow_trap_nonpresent_pte;
549 else
550 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
7ec54588 551 }
c7addb90
AK
552}
553
e8bc217a
MT
554/*
555 * Using the cached information from sp->gfns is safe because:
556 * - The spte has a reference to the struct page, so the pfn for a given gfn
557 * can't change unless all sptes pointing to it are nuked first.
558 * - Alias changes zap the entire shadow cache.
559 */
560static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
561{
562 int i, offset, nr_present;
563
564 offset = nr_present = 0;
565
566 if (PTTYPE == 32)
567 offset = sp->role.quadrant << PT64_LEVEL_BITS;
568
569 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
570 unsigned pte_access;
571 pt_element_t gpte;
572 gpa_t pte_gpa;
573 gfn_t gfn = sp->gfns[i];
574
575 if (!is_shadow_present_pte(sp->spt[i]))
576 continue;
577
578 pte_gpa = gfn_to_gpa(sp->gfn);
579 pte_gpa += (i+offset) * sizeof(pt_element_t);
580
581 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
582 sizeof(pt_element_t)))
583 return -EINVAL;
584
585 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
586 !(gpte & PT_ACCESSED_MASK)) {
587 u64 nonpresent;
588
589 rmap_remove(vcpu->kvm, &sp->spt[i]);
590 if (is_present_pte(gpte))
591 nonpresent = shadow_trap_nonpresent_pte;
592 else
593 nonpresent = shadow_notrap_nonpresent_pte;
594 set_shadow_pte(&sp->spt[i], nonpresent);
595 continue;
596 }
597
598 nr_present++;
599 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
600 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
6cffe8ca 601 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
4731d4c7 602 spte_to_pfn(sp->spt[i]), true, false);
e8bc217a
MT
603 }
604
605 return !nr_present;
606}
607
6aa8b732
AK
608#undef pt_element_t
609#undef guest_walker
abb9e0b8 610#undef shadow_walker
6aa8b732
AK
611#undef FNAME
612#undef PT_BASE_ADDR_MASK
613#undef PT_INDEX
6aa8b732 614#undef PT_LEVEL_MASK
6aa8b732 615#undef PT_DIR_BASE_ADDR_MASK
c7addb90 616#undef PT_LEVEL_BITS
cea0f0e7 617#undef PT_MAX_FULL_LEVELS
5fb07ddb
AK
618#undef gpte_to_gfn
619#undef gpte_to_gfn_pde
b3e4e63f 620#undef CMPXCHG