]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kvm/paging_tmpl.h
KVM: Avoid killing userspace through guest SRAO MCE on unmapped pages
[net-next-2.6.git] / arch / x86 / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
e04da980
JR
30 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
31 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
6aa8b732 32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
6aa8b732 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
b3e4e63f 37 #define CMPXCHG cmpxchg
cea0f0e7 38 #else
b3e4e63f 39 #define CMPXCHG cmpxchg64
cea0f0e7
AK
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
6aa8b732
AK
42#elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
e04da980
JR
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
6aa8b732 49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
6aa8b732 50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 52 #define PT_MAX_FULL_LEVELS 2
b3e4e63f 53 #define CMPXCHG cmpxchg
6aa8b732
AK
54#else
55 #error Invalid PTTYPE value
56#endif
57
e04da980
JR
58#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
59#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
5fb07ddb 60
6aa8b732
AK
61/*
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
64 */
65struct guest_walker {
66 int level;
cea0f0e7 67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
7819026e
MT
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
fe135d2c
AK
70 unsigned pt_access;
71 unsigned pte_access;
815af8d4 72 gfn_t gfn;
7993ba43 73 u32 error_code;
6aa8b732
AK
74};
75
e04da980 76static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
5fb07ddb 77{
e04da980 78 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
5fb07ddb
AK
79}
80
b3e4e63f
MT
81static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
82 gfn_t table_gfn, unsigned index,
83 pt_element_t orig_pte, pt_element_t new_pte)
84{
85 pt_element_t ret;
86 pt_element_t *table;
87 struct page *page;
88
89 page = gfn_to_page(kvm, table_gfn);
72dc67a6 90
b3e4e63f 91 table = kmap_atomic(page, KM_USER0);
b3e4e63f 92 ret = CMPXCHG(&table[index], orig_pte, new_pte);
b3e4e63f
MT
93 kunmap_atomic(table, KM_USER0);
94
95 kvm_release_page_dirty(page);
96
97 return (ret != orig_pte);
98}
99
bedbe4ee
AK
100static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
101{
102 unsigned access;
103
104 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
105#if PTTYPE == 64
106 if (is_nx(vcpu))
107 access &= ~(gpte >> PT64_NX_SHIFT);
108#endif
109 return access;
110}
111
ac79c978
AK
112/*
113 * Fetch a guest pte for a guest virtual address
114 */
7993ba43
AK
115static int FNAME(walk_addr)(struct guest_walker *walker,
116 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 117 int write_fault, int user_fault, int fetch_fault)
6aa8b732 118{
42bf3f0a 119 pt_element_t pte;
cea0f0e7 120 gfn_t table_gfn;
fe135d2c 121 unsigned index, pt_access, pte_access;
42bf3f0a 122 gpa_t pte_gpa;
82725b20 123 int rsvd_fault = 0;
6aa8b732 124
07420171
AK
125 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
126 fetch_fault);
b3e4e63f 127walk:
ad312c7c
ZX
128 walker->level = vcpu->arch.mmu.root_level;
129 pte = vcpu->arch.cr3;
1b0973bd
AK
130#if PTTYPE == 64
131 if (!is_long_mode(vcpu)) {
6de4f3ad 132 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
07420171 133 trace_kvm_mmu_paging_element(pte, walker->level);
43a3795a 134 if (!is_present_gpte(pte))
7993ba43 135 goto not_present;
1b0973bd
AK
136 --walker->level;
137 }
138#endif
a9058ecd 139 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
24993d53 140 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 141
fe135d2c 142 pt_access = ACC_ALL;
ac79c978
AK
143
144 for (;;) {
42bf3f0a 145 index = PT_INDEX(addr, walker->level);
ac79c978 146
5fb07ddb 147 table_gfn = gpte_to_gfn(pte);
1755fbcc 148 pte_gpa = gfn_to_gpa(table_gfn);
ec8d4eae 149 pte_gpa += index * sizeof(pt_element_t);
42bf3f0a 150 walker->table_gfn[walker->level - 1] = table_gfn;
7819026e 151 walker->pte_gpa[walker->level - 1] = pte_gpa;
42bf3f0a 152
a6085fba
MT
153 if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)))
154 goto not_present;
155
07420171 156 trace_kvm_mmu_paging_element(pte, walker->level);
42bf3f0a 157
43a3795a 158 if (!is_present_gpte(pte))
7993ba43
AK
159 goto not_present;
160
82725b20
DE
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
162 if (rsvd_fault)
163 goto access_error;
164
8dae4445 165 if (write_fault && !is_writable_pte(pte))
7993ba43
AK
166 if (user_fault || is_write_protection(vcpu))
167 goto access_error;
168
42bf3f0a 169 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
170 goto access_error;
171
73b1087e 172#if PTTYPE == 64
24222c2f 173 if (fetch_fault && (pte & PT64_NX_MASK))
73b1087e
AK
174 goto access_error;
175#endif
176
42bf3f0a 177 if (!(pte & PT_ACCESSED_MASK)) {
07420171
AK
178 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
179 sizeof(pte));
bf3f8e86 180 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
181 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
182 index, pte, pte|PT_ACCESSED_MASK))
183 goto walk;
42bf3f0a 184 pte |= PT_ACCESSED_MASK;
bf3f8e86 185 }
815af8d4 186
bedbe4ee 187 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
fe135d2c 188
7819026e
MT
189 walker->ptes[walker->level - 1] = pte;
190
e04da980
JR
191 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
192 ((walker->level == PT_DIRECTORY_LEVEL) &&
814a59d2 193 is_large_pte(pte) &&
e04da980
JR
194 (PTTYPE == 64 || is_pse(vcpu))) ||
195 ((walker->level == PT_PDPE_LEVEL) &&
814a59d2 196 is_large_pte(pte) &&
e04da980
JR
197 is_long_mode(vcpu))) {
198 int lvl = walker->level;
199
200 walker->gfn = gpte_to_gfn_lvl(pte, lvl);
201 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
202 >> PAGE_SHIFT;
203
204 if (PTTYPE == 32 &&
205 walker->level == PT_DIRECTORY_LEVEL &&
206 is_cpuid_PSE36())
da928521 207 walker->gfn += pse36_gfn_delta(pte);
e04da980 208
ac79c978 209 break;
815af8d4 210 }
ac79c978 211
fe135d2c 212 pt_access = pte_access;
ac79c978
AK
213 --walker->level;
214 }
42bf3f0a 215
43a3795a 216 if (write_fault && !is_dirty_gpte(pte)) {
b3e4e63f
MT
217 bool ret;
218
07420171 219 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
42bf3f0a 220 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
221 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
222 pte|PT_DIRTY_MASK);
223 if (ret)
224 goto walk;
42bf3f0a 225 pte |= PT_DIRTY_MASK;
7819026e 226 walker->ptes[walker->level - 1] = pte;
42bf3f0a
AK
227 }
228
fe135d2c
AK
229 walker->pt_access = pt_access;
230 walker->pte_access = pte_access;
231 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
b8688d51 232 __func__, (u64)pte, pt_access, pte_access);
7993ba43
AK
233 return 1;
234
235not_present:
236 walker->error_code = 0;
237 goto err;
238
239access_error:
240 walker->error_code = PFERR_PRESENT_MASK;
241
242err:
243 if (write_fault)
244 walker->error_code |= PFERR_WRITE_MASK;
245 if (user_fault)
246 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
247 if (fetch_fault)
248 walker->error_code |= PFERR_FETCH_MASK;
82725b20
DE
249 if (rsvd_fault)
250 walker->error_code |= PFERR_RSVD_MASK;
07420171 251 trace_kvm_mmu_walker_error(walker->error_code);
fe551881 252 return 0;
6aa8b732
AK
253}
254
0028425f 255static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
489f1d65 256 u64 *spte, const void *pte)
0028425f
AK
257{
258 pt_element_t gpte;
41074d07 259 unsigned pte_access;
35149e21 260 pfn_t pfn;
fbc5d139 261 u64 new_spte;
0028425f 262
0028425f 263 gpte = *(const pt_element_t *)pte;
c7addb90 264 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
fbc5d139
AK
265 if (!is_present_gpte(gpte)) {
266 if (page->unsync)
267 new_spte = shadow_trap_nonpresent_pte;
268 else
269 new_spte = shadow_notrap_nonpresent_pte;
270 __set_spte(spte, new_spte);
271 }
c7addb90
AK
272 return;
273 }
b8688d51 274 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
41074d07 275 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
d7824fff
AK
276 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
277 return;
35149e21
AL
278 pfn = vcpu->arch.update_pte.pfn;
279 if (is_error_pfn(pfn))
d7824fff 280 return;
e930bffe
AA
281 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
282 return;
35149e21 283 kvm_get_pfn(pfn);
1403283a
IE
284 /*
285 * we call mmu_set_spte() with reset_host_protection = true beacuse that
286 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
287 */
1c4f1fd6 288 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
7e4e4056 289 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
1403283a 290 gpte_to_gfn(gpte), pfn, true, true);
0028425f
AK
291}
292
6aa8b732
AK
293/*
294 * Fetch a shadow pte for a specific level in the paging hierarchy.
295 */
e7a04c99
AK
296static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
297 struct guest_walker *gw,
7e4e4056 298 int user_fault, int write_fault, int hlevel,
e7a04c99 299 int *ptwrite, pfn_t pfn)
6aa8b732 300{
abb9e0b8
AK
301 unsigned access = gw->pt_access;
302 struct kvm_mmu_page *shadow_page;
bde89223 303 u64 spte, *sptep = NULL;
f6e2c02b 304 int direct;
abb9e0b8
AK
305 gfn_t table_gfn;
306 int r;
e7a04c99 307 int level;
abb9e0b8 308 pt_element_t curr_pte;
e7a04c99 309 struct kvm_shadow_walk_iterator iterator;
abb9e0b8 310
43a3795a 311 if (!is_present_gpte(gw->ptes[gw->level - 1]))
e7a04c99 312 return NULL;
6aa8b732 313
e7a04c99
AK
314 for_each_shadow_entry(vcpu, addr, iterator) {
315 level = iterator.level;
316 sptep = iterator.sptep;
7e4e4056 317 if (iterator.level == hlevel) {
e7a04c99
AK
318 mmu_set_spte(vcpu, sptep, access,
319 gw->pte_access & access,
320 user_fault, write_fault,
321 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
852e3c19 322 ptwrite, level,
1403283a 323 gw->gfn, pfn, false, true);
e7a04c99
AK
324 break;
325 }
6aa8b732 326
e7a04c99
AK
327 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
328 continue;
abb9e0b8 329
e7a04c99 330 if (is_large_pte(*sptep)) {
c5bc2242 331 rmap_remove(vcpu->kvm, sptep);
d555c333 332 __set_spte(sptep, shadow_trap_nonpresent_pte);
e7a04c99 333 kvm_flush_remote_tlbs(vcpu->kvm);
7819026e 334 }
ef0197e8 335
7e4e4056
JR
336 if (level <= gw->level) {
337 int delta = level - gw->level + 1;
f6e2c02b 338 direct = 1;
7e4e4056 339 if (!is_dirty_gpte(gw->ptes[level - delta]))
e7a04c99 340 access &= ~ACC_WRITE_MASK;
7e4e4056
JR
341 table_gfn = gpte_to_gfn(gw->ptes[level - delta]);
342 /* advance table_gfn when emulating 1gb pages with 4k */
343 if (delta == 0)
344 table_gfn += PT_INDEX(addr, level);
6aa0b9de 345 access &= gw->pte_access;
e7a04c99 346 } else {
f6e2c02b 347 direct = 0;
e7a04c99
AK
348 table_gfn = gw->table_gfn[level - 2];
349 }
350 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
f6e2c02b
AK
351 direct, access, sptep);
352 if (!direct) {
e7a04c99
AK
353 r = kvm_read_guest_atomic(vcpu->kvm,
354 gw->pte_gpa[level - 2],
355 &curr_pte, sizeof(curr_pte));
356 if (r || curr_pte != gw->ptes[level - 2]) {
357 kvm_mmu_put_page(shadow_page, sptep);
358 kvm_release_pfn_clean(pfn);
359 sptep = NULL;
360 break;
361 }
362 }
abb9e0b8 363
e7a04c99
AK
364 spte = __pa(shadow_page->spt)
365 | PT_PRESENT_MASK | PT_ACCESSED_MASK
366 | PT_WRITABLE_MASK | PT_USER_MASK;
367 *sptep = spte;
368 }
050e6499 369
e7a04c99 370 return sptep;
6aa8b732
AK
371}
372
6aa8b732
AK
373/*
374 * Page fault handler. There are several causes for a page fault:
375 * - there is no shadow pte for the guest pte
376 * - write access through a shadow pte marked read only so that we can set
377 * the dirty bit
378 * - write access to a shadow pte marked read only so we can update the page
379 * dirty bitmap, when userspace requests it
380 * - mmio access; in this case we will never install a present shadow pte
381 * - normal guest page fault due to the guest pte marked not present, not
382 * writable, or not executable
383 *
e2dec939
AK
384 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
385 * a negative value on error.
6aa8b732
AK
386 */
387static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
388 u32 error_code)
389{
390 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 391 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 392 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732 393 struct guest_walker walker;
d555c333 394 u64 *sptep;
cea0f0e7 395 int write_pt = 0;
e2dec939 396 int r;
35149e21 397 pfn_t pfn;
7e4e4056 398 int level = PT_PAGE_TABLE_LEVEL;
e930bffe 399 unsigned long mmu_seq;
6aa8b732 400
b8688d51 401 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
37a7d8b0 402 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 403
e2dec939
AK
404 r = mmu_topup_memory_caches(vcpu);
405 if (r)
406 return r;
714b93da 407
6aa8b732 408 /*
a8b876b1 409 * Look up the guest pte for the faulting address.
6aa8b732 410 */
73b1087e
AK
411 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
412 fetch_fault);
6aa8b732
AK
413
414 /*
415 * The page is not mapped by the guest. Let the guest handle it.
416 */
7993ba43 417 if (!r) {
b8688d51 418 pgprintk("%s: guest page fault\n", __func__);
7993ba43 419 inject_page_fault(vcpu, addr, walker.error_code);
ad312c7c 420 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
6aa8b732
AK
421 return 0;
422 }
423
7e4e4056
JR
424 if (walker.level >= PT_DIRECTORY_LEVEL) {
425 level = min(walker.level, mapping_level(vcpu, walker.gfn));
426 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
05da4558 427 }
7e4e4056 428
e930bffe 429 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 430 smp_rmb();
35149e21 431 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
d7824fff 432
d196e343 433 /* mmio */
bf998156
HY
434 if (is_error_pfn(pfn))
435 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
d196e343 436
aaee2c94 437 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
438 if (mmu_notifier_retry(vcpu, mmu_seq))
439 goto out_unlock;
eb787d10 440 kvm_mmu_free_some_pages(vcpu);
d555c333 441 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
7e4e4056 442 level, &write_pt, pfn);
b8688d51 443 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
d555c333 444 sptep, *sptep, write_pt);
cea0f0e7 445
a25f7e1f 446 if (!write_pt)
ad312c7c 447 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
a25f7e1f 448
1165f5fe 449 ++vcpu->stat.pf_fixed;
37a7d8b0 450 kvm_mmu_audit(vcpu, "post page fault (fixed)");
aaee2c94 451 spin_unlock(&vcpu->kvm->mmu_lock);
6aa8b732 452
cea0f0e7 453 return write_pt;
e930bffe
AA
454
455out_unlock:
456 spin_unlock(&vcpu->kvm->mmu_lock);
457 kvm_release_pfn_clean(pfn);
458 return 0;
6aa8b732
AK
459}
460
a461930b 461static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
a7052897 462{
a461930b 463 struct kvm_shadow_walk_iterator iterator;
08e850c6 464 gpa_t pte_gpa = -1;
a461930b
AK
465 int level;
466 u64 *sptep;
4539b358 467 int need_flush = 0;
a461930b
AK
468
469 spin_lock(&vcpu->kvm->mmu_lock);
a7052897 470
a461930b
AK
471 for_each_shadow_entry(vcpu, gva, iterator) {
472 level = iterator.level;
473 sptep = iterator.sptep;
ad218f85 474
884a0ff0 475 if (is_last_spte(*sptep, level)) {
08e850c6 476 struct kvm_mmu_page *sp = page_header(__pa(sptep));
22c9b2d1 477 int offset, shift;
08e850c6 478
22c9b2d1
XG
479 shift = PAGE_SHIFT -
480 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
481 offset = sp->role.quadrant << shift;
482
483 pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
08e850c6 484 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
a461930b
AK
485
486 if (is_shadow_present_pte(*sptep)) {
487 rmap_remove(vcpu->kvm, sptep);
488 if (is_large_pte(*sptep))
489 --vcpu->kvm->stat.lpages;
4539b358 490 need_flush = 1;
a461930b 491 }
d555c333 492 __set_spte(sptep, shadow_trap_nonpresent_pte);
a461930b 493 break;
87917239 494 }
a7052897 495
a461930b
AK
496 if (!is_shadow_present_pte(*sptep))
497 break;
498 }
a7052897 499
4539b358
AA
500 if (need_flush)
501 kvm_flush_remote_tlbs(vcpu->kvm);
08e850c6
AK
502
503 atomic_inc(&vcpu->kvm->arch.invlpg_counter);
504
ad218f85 505 spin_unlock(&vcpu->kvm->mmu_lock);
08e850c6
AK
506
507 if (pte_gpa == -1)
508 return;
509
510 if (mmu_topup_memory_caches(vcpu))
511 return;
512 kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
a7052897
MT
513}
514
1871c602
GN
515static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
516 u32 *error)
6aa8b732
AK
517{
518 struct guest_walker walker;
e119d117
AK
519 gpa_t gpa = UNMAPPED_GVA;
520 int r;
6aa8b732 521
1871c602
GN
522 r = FNAME(walk_addr)(&walker, vcpu, vaddr,
523 !!(access & PFERR_WRITE_MASK),
524 !!(access & PFERR_USER_MASK),
525 !!(access & PFERR_FETCH_MASK));
6aa8b732 526
e119d117 527 if (r) {
1755fbcc 528 gpa = gfn_to_gpa(walker.gfn);
e119d117 529 gpa |= vaddr & ~PAGE_MASK;
1871c602
GN
530 } else if (error)
531 *error = walker.error_code;
6aa8b732
AK
532
533 return gpa;
534}
535
c7addb90
AK
536static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
537 struct kvm_mmu_page *sp)
538{
eab9f71f
AK
539 int i, j, offset, r;
540 pt_element_t pt[256 / sizeof(pt_element_t)];
541 gpa_t pte_gpa;
c7addb90 542
f6e2c02b 543 if (sp->role.direct
e5a4c8ca 544 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
c7addb90
AK
545 nonpaging_prefetch_page(vcpu, sp);
546 return;
547 }
548
eab9f71f
AK
549 pte_gpa = gfn_to_gpa(sp->gfn);
550 if (PTTYPE == 32) {
e5a4c8ca 551 offset = sp->role.quadrant << PT64_LEVEL_BITS;
eab9f71f
AK
552 pte_gpa += offset * sizeof(pt_element_t);
553 }
7ec54588 554
eab9f71f
AK
555 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
556 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
557 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
558 for (j = 0; j < ARRAY_SIZE(pt); ++j)
43a3795a 559 if (r || is_present_gpte(pt[j]))
eab9f71f
AK
560 sp->spt[i+j] = shadow_trap_nonpresent_pte;
561 else
562 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
7ec54588 563 }
c7addb90
AK
564}
565
e8bc217a
MT
566/*
567 * Using the cached information from sp->gfns is safe because:
568 * - The spte has a reference to the struct page, so the pfn for a given gfn
569 * can't change unless all sptes pointing to it are nuked first.
570 * - Alias changes zap the entire shadow cache.
571 */
572static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
573{
574 int i, offset, nr_present;
1403283a 575 bool reset_host_protection;
51fb60d8 576 gpa_t first_pte_gpa;
e8bc217a
MT
577
578 offset = nr_present = 0;
579
580 if (PTTYPE == 32)
581 offset = sp->role.quadrant << PT64_LEVEL_BITS;
582
51fb60d8
GJ
583 first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
584
e8bc217a
MT
585 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
586 unsigned pte_access;
587 pt_element_t gpte;
588 gpa_t pte_gpa;
589 gfn_t gfn = sp->gfns[i];
590
591 if (!is_shadow_present_pte(sp->spt[i]))
592 continue;
593
51fb60d8 594 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
e8bc217a
MT
595
596 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
597 sizeof(pt_element_t)))
598 return -EINVAL;
599
43a3795a 600 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
e8bc217a
MT
601 !(gpte & PT_ACCESSED_MASK)) {
602 u64 nonpresent;
603
604 rmap_remove(vcpu->kvm, &sp->spt[i]);
43a3795a 605 if (is_present_gpte(gpte))
e8bc217a
MT
606 nonpresent = shadow_trap_nonpresent_pte;
607 else
608 nonpresent = shadow_notrap_nonpresent_pte;
d555c333 609 __set_spte(&sp->spt[i], nonpresent);
e8bc217a
MT
610 continue;
611 }
612
613 nr_present++;
614 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
1403283a
IE
615 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
616 pte_access &= ~ACC_WRITE_MASK;
617 reset_host_protection = 0;
618 } else {
619 reset_host_protection = 1;
620 }
e8bc217a 621 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
7e4e4056 622 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
1403283a
IE
623 spte_to_pfn(sp->spt[i]), true, false,
624 reset_host_protection);
e8bc217a
MT
625 }
626
627 return !nr_present;
628}
629
6aa8b732
AK
630#undef pt_element_t
631#undef guest_walker
632#undef FNAME
633#undef PT_BASE_ADDR_MASK
634#undef PT_INDEX
6aa8b732 635#undef PT_LEVEL_MASK
e04da980
JR
636#undef PT_LVL_ADDR_MASK
637#undef PT_LVL_OFFSET_MASK
c7addb90 638#undef PT_LEVEL_BITS
cea0f0e7 639#undef PT_MAX_FULL_LEVELS
5fb07ddb 640#undef gpte_to_gfn
e04da980 641#undef gpte_to_gfn_lvl
b3e4e63f 642#undef CMPXCHG