]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kvm/x86.c
KVM: Fix cpuid feature misreporting
[net-next-2.6.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
c8076604 39#include <linux/cpufreq.h>
043405e1
CO
40
41#include <asm/uaccess.h>
d825ed0a 42#include <asm/msr.h>
a5f61300 43#include <asm/desc.h>
0bed3b56 44#include <asm/mtrr.h>
043405e1 45
313a3dc7 46#define MAX_IO_MSRS 256
a03490ed
CO
47#define CR0_RESERVED_BITS \
48 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
49 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
50 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
51#define CR4_RESERVED_BITS \
52 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
53 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
54 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
55 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
56
57#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
58/* EFER defaults:
59 * - enable syscall per default because its emulated by KVM
60 * - enable LME and LMA per default on 64 bit KVM
61 */
62#ifdef CONFIG_X86_64
63static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
64#else
65static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
66#endif
313a3dc7 67
ba1389b7
AK
68#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
69#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 70
674eea0f
AK
71static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
72 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
73struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
74 u32 function, u32 index);
674eea0f 75
97896d04 76struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 77EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 78
417bc304 79struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
80 { "pf_fixed", VCPU_STAT(pf_fixed) },
81 { "pf_guest", VCPU_STAT(pf_guest) },
82 { "tlb_flush", VCPU_STAT(tlb_flush) },
83 { "invlpg", VCPU_STAT(invlpg) },
84 { "exits", VCPU_STAT(exits) },
85 { "io_exits", VCPU_STAT(io_exits) },
86 { "mmio_exits", VCPU_STAT(mmio_exits) },
87 { "signal_exits", VCPU_STAT(signal_exits) },
88 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 89 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
90 { "halt_exits", VCPU_STAT(halt_exits) },
91 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 92 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
93 { "request_irq", VCPU_STAT(request_irq_exits) },
94 { "irq_exits", VCPU_STAT(irq_exits) },
95 { "host_state_reload", VCPU_STAT(host_state_reload) },
96 { "efer_reload", VCPU_STAT(efer_reload) },
97 { "fpu_reload", VCPU_STAT(fpu_reload) },
98 { "insn_emulation", VCPU_STAT(insn_emulation) },
99 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 100 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 101 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
102 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
103 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
104 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
105 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
106 { "mmu_flooded", VM_STAT(mmu_flooded) },
107 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 108 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 109 { "mmu_unsync", VM_STAT(mmu_unsync) },
0f74a24c 110 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 111 { "largepages", VM_STAT(lpages) },
417bc304
HB
112 { NULL }
113};
114
5fb76f9b
CO
115unsigned long segment_base(u16 selector)
116{
117 struct descriptor_table gdt;
a5f61300 118 struct desc_struct *d;
5fb76f9b
CO
119 unsigned long table_base;
120 unsigned long v;
121
122 if (selector == 0)
123 return 0;
124
125 asm("sgdt %0" : "=m"(gdt));
126 table_base = gdt.base;
127
128 if (selector & 4) { /* from ldt */
129 u16 ldt_selector;
130
131 asm("sldt %0" : "=g"(ldt_selector));
132 table_base = segment_base(ldt_selector);
133 }
a5f61300
AK
134 d = (struct desc_struct *)(table_base + (selector & ~7));
135 v = d->base0 | ((unsigned long)d->base1 << 16) |
136 ((unsigned long)d->base2 << 24);
5fb76f9b 137#ifdef CONFIG_X86_64
a5f61300
AK
138 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
139 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
140#endif
141 return v;
142}
143EXPORT_SYMBOL_GPL(segment_base);
144
6866b83e
CO
145u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
146{
147 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 148 return vcpu->arch.apic_base;
6866b83e 149 else
ad312c7c 150 return vcpu->arch.apic_base;
6866b83e
CO
151}
152EXPORT_SYMBOL_GPL(kvm_get_apic_base);
153
154void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
155{
156 /* TODO: reserve bits check */
157 if (irqchip_in_kernel(vcpu->kvm))
158 kvm_lapic_set_base(vcpu, data);
159 else
ad312c7c 160 vcpu->arch.apic_base = data;
6866b83e
CO
161}
162EXPORT_SYMBOL_GPL(kvm_set_apic_base);
163
298101da
AK
164void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
165{
ad312c7c
ZX
166 WARN_ON(vcpu->arch.exception.pending);
167 vcpu->arch.exception.pending = true;
168 vcpu->arch.exception.has_error_code = false;
169 vcpu->arch.exception.nr = nr;
298101da
AK
170}
171EXPORT_SYMBOL_GPL(kvm_queue_exception);
172
c3c91fee
AK
173void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
174 u32 error_code)
175{
176 ++vcpu->stat.pf_guest;
d8017474 177
71c4dfaf
JR
178 if (vcpu->arch.exception.pending) {
179 if (vcpu->arch.exception.nr == PF_VECTOR) {
180 printk(KERN_DEBUG "kvm: inject_page_fault:"
181 " double fault 0x%lx\n", addr);
182 vcpu->arch.exception.nr = DF_VECTOR;
183 vcpu->arch.exception.error_code = 0;
184 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
185 /* triple fault -> shutdown */
186 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
187 }
c3c91fee
AK
188 return;
189 }
ad312c7c 190 vcpu->arch.cr2 = addr;
c3c91fee
AK
191 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
192}
193
3419ffc8
SY
194void kvm_inject_nmi(struct kvm_vcpu *vcpu)
195{
196 vcpu->arch.nmi_pending = 1;
197}
198EXPORT_SYMBOL_GPL(kvm_inject_nmi);
199
298101da
AK
200void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
201{
ad312c7c
ZX
202 WARN_ON(vcpu->arch.exception.pending);
203 vcpu->arch.exception.pending = true;
204 vcpu->arch.exception.has_error_code = true;
205 vcpu->arch.exception.nr = nr;
206 vcpu->arch.exception.error_code = error_code;
298101da
AK
207}
208EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
209
210static void __queue_exception(struct kvm_vcpu *vcpu)
211{
ad312c7c
ZX
212 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
213 vcpu->arch.exception.has_error_code,
214 vcpu->arch.exception.error_code);
298101da
AK
215}
216
a03490ed
CO
217/*
218 * Load the pae pdptrs. Return true is they are all valid.
219 */
220int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
221{
222 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
223 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
224 int i;
225 int ret;
ad312c7c 226 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 227
a03490ed
CO
228 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
229 offset * sizeof(u64), sizeof(pdpte));
230 if (ret < 0) {
231 ret = 0;
232 goto out;
233 }
234 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
20c466b5
DE
235 if (is_present_pte(pdpte[i]) &&
236 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
a03490ed
CO
237 ret = 0;
238 goto out;
239 }
240 }
241 ret = 1;
242
ad312c7c 243 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 244out:
a03490ed
CO
245
246 return ret;
247}
cc4b6871 248EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 249
d835dfec
AK
250static bool pdptrs_changed(struct kvm_vcpu *vcpu)
251{
ad312c7c 252 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
253 bool changed = true;
254 int r;
255
256 if (is_long_mode(vcpu) || !is_pae(vcpu))
257 return false;
258
ad312c7c 259 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
260 if (r < 0)
261 goto out;
ad312c7c 262 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 263out:
d835dfec
AK
264
265 return changed;
266}
267
2d3ad1f4 268void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
269{
270 if (cr0 & CR0_RESERVED_BITS) {
271 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 272 cr0, vcpu->arch.cr0);
c1a5d4f9 273 kvm_inject_gp(vcpu, 0);
a03490ed
CO
274 return;
275 }
276
277 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
278 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 279 kvm_inject_gp(vcpu, 0);
a03490ed
CO
280 return;
281 }
282
283 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
284 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
285 "and a clear PE flag\n");
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288 }
289
290 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
291#ifdef CONFIG_X86_64
ad312c7c 292 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
293 int cs_db, cs_l;
294
295 if (!is_pae(vcpu)) {
296 printk(KERN_DEBUG "set_cr0: #GP, start paging "
297 "in long mode while PAE is disabled\n");
c1a5d4f9 298 kvm_inject_gp(vcpu, 0);
a03490ed
CO
299 return;
300 }
301 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
302 if (cs_l) {
303 printk(KERN_DEBUG "set_cr0: #GP, start paging "
304 "in long mode while CS.L == 1\n");
c1a5d4f9 305 kvm_inject_gp(vcpu, 0);
a03490ed
CO
306 return;
307
308 }
309 } else
310#endif
ad312c7c 311 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
312 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
313 "reserved bits\n");
c1a5d4f9 314 kvm_inject_gp(vcpu, 0);
a03490ed
CO
315 return;
316 }
317
318 }
319
320 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 321 vcpu->arch.cr0 = cr0;
a03490ed 322
a03490ed 323 kvm_mmu_reset_context(vcpu);
a03490ed
CO
324 return;
325}
2d3ad1f4 326EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 327
2d3ad1f4 328void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 329{
2d3ad1f4 330 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
331 KVMTRACE_1D(LMSW, vcpu,
332 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
333 handler);
a03490ed 334}
2d3ad1f4 335EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 336
2d3ad1f4 337void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 338{
a2edf57f
AK
339 unsigned long old_cr4 = vcpu->arch.cr4;
340 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
341
a03490ed
CO
342 if (cr4 & CR4_RESERVED_BITS) {
343 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 344 kvm_inject_gp(vcpu, 0);
a03490ed
CO
345 return;
346 }
347
348 if (is_long_mode(vcpu)) {
349 if (!(cr4 & X86_CR4_PAE)) {
350 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
351 "in long mode\n");
c1a5d4f9 352 kvm_inject_gp(vcpu, 0);
a03490ed
CO
353 return;
354 }
a2edf57f
AK
355 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
356 && ((cr4 ^ old_cr4) & pdptr_bits)
ad312c7c 357 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 358 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 359 kvm_inject_gp(vcpu, 0);
a03490ed
CO
360 return;
361 }
362
363 if (cr4 & X86_CR4_VMXE) {
364 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 365 kvm_inject_gp(vcpu, 0);
a03490ed
CO
366 return;
367 }
368 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 369 vcpu->arch.cr4 = cr4;
5a41accd 370 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
a03490ed 371 kvm_mmu_reset_context(vcpu);
a03490ed 372}
2d3ad1f4 373EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 374
2d3ad1f4 375void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 376{
ad312c7c 377 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 378 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
379 kvm_mmu_flush_tlb(vcpu);
380 return;
381 }
382
a03490ed
CO
383 if (is_long_mode(vcpu)) {
384 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
385 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 386 kvm_inject_gp(vcpu, 0);
a03490ed
CO
387 return;
388 }
389 } else {
390 if (is_pae(vcpu)) {
391 if (cr3 & CR3_PAE_RESERVED_BITS) {
392 printk(KERN_DEBUG
393 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 394 kvm_inject_gp(vcpu, 0);
a03490ed
CO
395 return;
396 }
397 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
398 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
399 "reserved bits\n");
c1a5d4f9 400 kvm_inject_gp(vcpu, 0);
a03490ed
CO
401 return;
402 }
403 }
404 /*
405 * We don't check reserved bits in nonpae mode, because
406 * this isn't enforced, and VMware depends on this.
407 */
408 }
409
a03490ed
CO
410 /*
411 * Does the new cr3 value map to physical memory? (Note, we
412 * catch an invalid cr3 even in real-mode, because it would
413 * cause trouble later on when we turn on paging anyway.)
414 *
415 * A real CPU would silently accept an invalid cr3 and would
416 * attempt to use it - with largely undefined (and often hard
417 * to debug) behavior on the guest side.
418 */
419 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 420 kvm_inject_gp(vcpu, 0);
a03490ed 421 else {
ad312c7c
ZX
422 vcpu->arch.cr3 = cr3;
423 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 424 }
a03490ed 425}
2d3ad1f4 426EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 427
2d3ad1f4 428void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
429{
430 if (cr8 & CR8_RESERVED_BITS) {
431 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 432 kvm_inject_gp(vcpu, 0);
a03490ed
CO
433 return;
434 }
435 if (irqchip_in_kernel(vcpu->kvm))
436 kvm_lapic_set_tpr(vcpu, cr8);
437 else
ad312c7c 438 vcpu->arch.cr8 = cr8;
a03490ed 439}
2d3ad1f4 440EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 441
2d3ad1f4 442unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
443{
444 if (irqchip_in_kernel(vcpu->kvm))
445 return kvm_lapic_get_cr8(vcpu);
446 else
ad312c7c 447 return vcpu->arch.cr8;
a03490ed 448}
2d3ad1f4 449EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 450
d8017474
AG
451static inline u32 bit(int bitno)
452{
453 return 1 << (bitno & 31);
454}
455
043405e1
CO
456/*
457 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
458 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
459 *
460 * This list is modified at module load time to reflect the
461 * capabilities of the host cpu.
462 */
463static u32 msrs_to_save[] = {
464 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
465 MSR_K6_STAR,
466#ifdef CONFIG_X86_64
467 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
468#endif
18068523 469 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 470 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
471};
472
473static unsigned num_msrs_to_save;
474
475static u32 emulated_msrs[] = {
476 MSR_IA32_MISC_ENABLE,
477};
478
15c4a640
CO
479static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
480{
f2b4b7dd 481 if (efer & efer_reserved_bits) {
15c4a640
CO
482 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
483 efer);
c1a5d4f9 484 kvm_inject_gp(vcpu, 0);
15c4a640
CO
485 return;
486 }
487
488 if (is_paging(vcpu)
ad312c7c 489 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 490 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 491 kvm_inject_gp(vcpu, 0);
15c4a640
CO
492 return;
493 }
494
1b2fd70c
AG
495 if (efer & EFER_FFXSR) {
496 struct kvm_cpuid_entry2 *feat;
497
498 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
499 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
500 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
501 kvm_inject_gp(vcpu, 0);
502 return;
503 }
504 }
505
d8017474
AG
506 if (efer & EFER_SVME) {
507 struct kvm_cpuid_entry2 *feat;
508
509 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
510 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
511 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
512 kvm_inject_gp(vcpu, 0);
513 return;
514 }
515 }
516
15c4a640
CO
517 kvm_x86_ops->set_efer(vcpu, efer);
518
519 efer &= ~EFER_LMA;
ad312c7c 520 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 521
ad312c7c 522 vcpu->arch.shadow_efer = efer;
9645bb56
AK
523
524 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
525 kvm_mmu_reset_context(vcpu);
15c4a640
CO
526}
527
f2b4b7dd
JR
528void kvm_enable_efer_bits(u64 mask)
529{
530 efer_reserved_bits &= ~mask;
531}
532EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
533
534
15c4a640
CO
535/*
536 * Writes msr value into into the appropriate "register".
537 * Returns 0 on success, non-0 otherwise.
538 * Assumes vcpu_load() was already called.
539 */
540int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
541{
542 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
543}
544
313a3dc7
CO
545/*
546 * Adapt set_msr() to msr_io()'s calling convention
547 */
548static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
549{
550 return kvm_set_msr(vcpu, index, *data);
551}
552
18068523
GOC
553static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
554{
555 static int version;
50d0a0f9
GH
556 struct pvclock_wall_clock wc;
557 struct timespec now, sys, boot;
18068523
GOC
558
559 if (!wall_clock)
560 return;
561
562 version++;
563
18068523
GOC
564 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
565
50d0a0f9
GH
566 /*
567 * The guest calculates current wall clock time by adding
568 * system time (updated by kvm_write_guest_time below) to the
569 * wall clock specified here. guest system time equals host
570 * system time for us, thus we must fill in host boot time here.
571 */
572 now = current_kernel_time();
573 ktime_get_ts(&sys);
574 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
575
576 wc.sec = boot.tv_sec;
577 wc.nsec = boot.tv_nsec;
578 wc.version = version;
18068523
GOC
579
580 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
581
582 version++;
583 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
584}
585
50d0a0f9
GH
586static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
587{
588 uint32_t quotient, remainder;
589
590 /* Don't try to replace with do_div(), this one calculates
591 * "(dividend << 32) / divisor" */
592 __asm__ ( "divl %4"
593 : "=a" (quotient), "=d" (remainder)
594 : "0" (0), "1" (dividend), "r" (divisor) );
595 return quotient;
596}
597
598static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
599{
600 uint64_t nsecs = 1000000000LL;
601 int32_t shift = 0;
602 uint64_t tps64;
603 uint32_t tps32;
604
605 tps64 = tsc_khz * 1000LL;
606 while (tps64 > nsecs*2) {
607 tps64 >>= 1;
608 shift--;
609 }
610
611 tps32 = (uint32_t)tps64;
612 while (tps32 <= (uint32_t)nsecs) {
613 tps32 <<= 1;
614 shift++;
615 }
616
617 hv_clock->tsc_shift = shift;
618 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
619
620 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 621 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
622 hv_clock->tsc_to_system_mul);
623}
624
c8076604
GH
625static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
626
18068523
GOC
627static void kvm_write_guest_time(struct kvm_vcpu *v)
628{
629 struct timespec ts;
630 unsigned long flags;
631 struct kvm_vcpu_arch *vcpu = &v->arch;
632 void *shared_kaddr;
463656c0 633 unsigned long this_tsc_khz;
18068523
GOC
634
635 if ((!vcpu->time_page))
636 return;
637
463656c0
AK
638 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
639 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
640 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
641 vcpu->hv_clock_tsc_khz = this_tsc_khz;
50d0a0f9 642 }
463656c0 643 put_cpu_var(cpu_tsc_khz);
50d0a0f9 644
18068523
GOC
645 /* Keep irq disabled to prevent changes to the clock */
646 local_irq_save(flags);
647 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
648 &vcpu->hv_clock.tsc_timestamp);
649 ktime_get_ts(&ts);
650 local_irq_restore(flags);
651
652 /* With all the info we got, fill in the values */
653
654 vcpu->hv_clock.system_time = ts.tv_nsec +
655 (NSEC_PER_SEC * (u64)ts.tv_sec);
656 /*
657 * The interface expects us to write an even number signaling that the
658 * update is finished. Since the guest won't see the intermediate
50d0a0f9 659 * state, we just increase by 2 at the end.
18068523 660 */
50d0a0f9 661 vcpu->hv_clock.version += 2;
18068523
GOC
662
663 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
664
665 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 666 sizeof(vcpu->hv_clock));
18068523
GOC
667
668 kunmap_atomic(shared_kaddr, KM_USER0);
669
670 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
671}
672
c8076604
GH
673static int kvm_request_guest_time_update(struct kvm_vcpu *v)
674{
675 struct kvm_vcpu_arch *vcpu = &v->arch;
676
677 if (!vcpu->time_page)
678 return 0;
679 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
680 return 1;
681}
682
9ba075a6
AK
683static bool msr_mtrr_valid(unsigned msr)
684{
685 switch (msr) {
686 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
687 case MSR_MTRRfix64K_00000:
688 case MSR_MTRRfix16K_80000:
689 case MSR_MTRRfix16K_A0000:
690 case MSR_MTRRfix4K_C0000:
691 case MSR_MTRRfix4K_C8000:
692 case MSR_MTRRfix4K_D0000:
693 case MSR_MTRRfix4K_D8000:
694 case MSR_MTRRfix4K_E0000:
695 case MSR_MTRRfix4K_E8000:
696 case MSR_MTRRfix4K_F0000:
697 case MSR_MTRRfix4K_F8000:
698 case MSR_MTRRdefType:
699 case MSR_IA32_CR_PAT:
700 return true;
701 case 0x2f8:
702 return true;
703 }
704 return false;
705}
706
707static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
708{
0bed3b56
SY
709 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
710
9ba075a6
AK
711 if (!msr_mtrr_valid(msr))
712 return 1;
713
0bed3b56
SY
714 if (msr == MSR_MTRRdefType) {
715 vcpu->arch.mtrr_state.def_type = data;
716 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
717 } else if (msr == MSR_MTRRfix64K_00000)
718 p[0] = data;
719 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
720 p[1 + msr - MSR_MTRRfix16K_80000] = data;
721 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
722 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
723 else if (msr == MSR_IA32_CR_PAT)
724 vcpu->arch.pat = data;
725 else { /* Variable MTRRs */
726 int idx, is_mtrr_mask;
727 u64 *pt;
728
729 idx = (msr - 0x200) / 2;
730 is_mtrr_mask = msr - 0x200 - 2 * idx;
731 if (!is_mtrr_mask)
732 pt =
733 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
734 else
735 pt =
736 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
737 *pt = data;
738 }
739
740 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
741 return 0;
742}
15c4a640
CO
743
744int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
745{
746 switch (msr) {
15c4a640
CO
747 case MSR_EFER:
748 set_efer(vcpu, data);
749 break;
15c4a640
CO
750 case MSR_IA32_MC0_STATUS:
751 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 752 __func__, data);
15c4a640
CO
753 break;
754 case MSR_IA32_MCG_STATUS:
755 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 756 __func__, data);
15c4a640 757 break;
c7ac679c
JR
758 case MSR_IA32_MCG_CTL:
759 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 760 __func__, data);
c7ac679c 761 break;
b5e2fec0
AG
762 case MSR_IA32_DEBUGCTLMSR:
763 if (!data) {
764 /* We support the non-activated case already */
765 break;
766 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
767 /* Values other than LBR and BTF are vendor-specific,
768 thus reserved and should throw a #GP */
769 return 1;
770 }
771 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
772 __func__, data);
773 break;
15c4a640
CO
774 case MSR_IA32_UCODE_REV:
775 case MSR_IA32_UCODE_WRITE:
61a6bd67 776 case MSR_VM_HSAVE_PA:
15c4a640 777 break;
9ba075a6
AK
778 case 0x200 ... 0x2ff:
779 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
780 case MSR_IA32_APICBASE:
781 kvm_set_apic_base(vcpu, data);
782 break;
783 case MSR_IA32_MISC_ENABLE:
ad312c7c 784 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 785 break;
18068523
GOC
786 case MSR_KVM_WALL_CLOCK:
787 vcpu->kvm->arch.wall_clock = data;
788 kvm_write_wall_clock(vcpu->kvm, data);
789 break;
790 case MSR_KVM_SYSTEM_TIME: {
791 if (vcpu->arch.time_page) {
792 kvm_release_page_dirty(vcpu->arch.time_page);
793 vcpu->arch.time_page = NULL;
794 }
795
796 vcpu->arch.time = data;
797
798 /* we verify if the enable bit is set... */
799 if (!(data & 1))
800 break;
801
802 /* ...but clean it before doing the actual write */
803 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
804
18068523
GOC
805 vcpu->arch.time_page =
806 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
807
808 if (is_error_page(vcpu->arch.time_page)) {
809 kvm_release_page_clean(vcpu->arch.time_page);
810 vcpu->arch.time_page = NULL;
811 }
812
c8076604 813 kvm_request_guest_time_update(vcpu);
18068523
GOC
814 break;
815 }
15c4a640 816 default:
565f1fbd 817 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
818 return 1;
819 }
820 return 0;
821}
822EXPORT_SYMBOL_GPL(kvm_set_msr_common);
823
824
825/*
826 * Reads an msr value (of 'msr_index') into 'pdata'.
827 * Returns 0 on success, non-0 otherwise.
828 * Assumes vcpu_load() was already called.
829 */
830int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
831{
832 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
833}
834
9ba075a6
AK
835static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
836{
0bed3b56
SY
837 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
838
9ba075a6
AK
839 if (!msr_mtrr_valid(msr))
840 return 1;
841
0bed3b56
SY
842 if (msr == MSR_MTRRdefType)
843 *pdata = vcpu->arch.mtrr_state.def_type +
844 (vcpu->arch.mtrr_state.enabled << 10);
845 else if (msr == MSR_MTRRfix64K_00000)
846 *pdata = p[0];
847 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
848 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
849 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
850 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
851 else if (msr == MSR_IA32_CR_PAT)
852 *pdata = vcpu->arch.pat;
853 else { /* Variable MTRRs */
854 int idx, is_mtrr_mask;
855 u64 *pt;
856
857 idx = (msr - 0x200) / 2;
858 is_mtrr_mask = msr - 0x200 - 2 * idx;
859 if (!is_mtrr_mask)
860 pt =
861 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
862 else
863 pt =
864 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
865 *pdata = *pt;
866 }
867
9ba075a6
AK
868 return 0;
869}
870
15c4a640
CO
871int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
872{
873 u64 data;
874
875 switch (msr) {
876 case 0xc0010010: /* SYSCFG */
877 case 0xc0010015: /* HWCR */
878 case MSR_IA32_PLATFORM_ID:
879 case MSR_IA32_P5_MC_ADDR:
880 case MSR_IA32_P5_MC_TYPE:
881 case MSR_IA32_MC0_CTL:
882 case MSR_IA32_MCG_STATUS:
883 case MSR_IA32_MCG_CAP:
c7ac679c 884 case MSR_IA32_MCG_CTL:
15c4a640
CO
885 case MSR_IA32_MC0_MISC:
886 case MSR_IA32_MC0_MISC+4:
887 case MSR_IA32_MC0_MISC+8:
888 case MSR_IA32_MC0_MISC+12:
889 case MSR_IA32_MC0_MISC+16:
a89c1ad2 890 case MSR_IA32_MC0_MISC+20:
15c4a640 891 case MSR_IA32_UCODE_REV:
15c4a640 892 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
893 case MSR_IA32_DEBUGCTLMSR:
894 case MSR_IA32_LASTBRANCHFROMIP:
895 case MSR_IA32_LASTBRANCHTOIP:
896 case MSR_IA32_LASTINTFROMIP:
897 case MSR_IA32_LASTINTTOIP:
61a6bd67 898 case MSR_VM_HSAVE_PA:
7fe29e0f
AS
899 case MSR_P6_EVNTSEL0:
900 case MSR_P6_EVNTSEL1:
15c4a640
CO
901 data = 0;
902 break;
9ba075a6
AK
903 case MSR_MTRRcap:
904 data = 0x500 | KVM_NR_VAR_MTRR;
905 break;
906 case 0x200 ... 0x2ff:
907 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
908 case 0xcd: /* fsb frequency */
909 data = 3;
910 break;
911 case MSR_IA32_APICBASE:
912 data = kvm_get_apic_base(vcpu);
913 break;
914 case MSR_IA32_MISC_ENABLE:
ad312c7c 915 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 916 break;
847f0ad8
AG
917 case MSR_IA32_PERF_STATUS:
918 /* TSC increment by tick */
919 data = 1000ULL;
920 /* CPU multiplier */
921 data |= (((uint64_t)4ULL) << 40);
922 break;
15c4a640 923 case MSR_EFER:
ad312c7c 924 data = vcpu->arch.shadow_efer;
15c4a640 925 break;
18068523
GOC
926 case MSR_KVM_WALL_CLOCK:
927 data = vcpu->kvm->arch.wall_clock;
928 break;
929 case MSR_KVM_SYSTEM_TIME:
930 data = vcpu->arch.time;
931 break;
15c4a640
CO
932 default:
933 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
934 return 1;
935 }
936 *pdata = data;
937 return 0;
938}
939EXPORT_SYMBOL_GPL(kvm_get_msr_common);
940
313a3dc7
CO
941/*
942 * Read or write a bunch of msrs. All parameters are kernel addresses.
943 *
944 * @return number of msrs set successfully.
945 */
946static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
947 struct kvm_msr_entry *entries,
948 int (*do_msr)(struct kvm_vcpu *vcpu,
949 unsigned index, u64 *data))
950{
951 int i;
952
953 vcpu_load(vcpu);
954
3200f405 955 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
956 for (i = 0; i < msrs->nmsrs; ++i)
957 if (do_msr(vcpu, entries[i].index, &entries[i].data))
958 break;
3200f405 959 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
960
961 vcpu_put(vcpu);
962
963 return i;
964}
965
966/*
967 * Read or write a bunch of msrs. Parameters are user addresses.
968 *
969 * @return number of msrs set successfully.
970 */
971static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
972 int (*do_msr)(struct kvm_vcpu *vcpu,
973 unsigned index, u64 *data),
974 int writeback)
975{
976 struct kvm_msrs msrs;
977 struct kvm_msr_entry *entries;
978 int r, n;
979 unsigned size;
980
981 r = -EFAULT;
982 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
983 goto out;
984
985 r = -E2BIG;
986 if (msrs.nmsrs >= MAX_IO_MSRS)
987 goto out;
988
989 r = -ENOMEM;
990 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
991 entries = vmalloc(size);
992 if (!entries)
993 goto out;
994
995 r = -EFAULT;
996 if (copy_from_user(entries, user_msrs->entries, size))
997 goto out_free;
998
999 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1000 if (r < 0)
1001 goto out_free;
1002
1003 r = -EFAULT;
1004 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1005 goto out_free;
1006
1007 r = n;
1008
1009out_free:
1010 vfree(entries);
1011out:
1012 return r;
1013}
1014
018d00d2
ZX
1015int kvm_dev_ioctl_check_extension(long ext)
1016{
1017 int r;
1018
1019 switch (ext) {
1020 case KVM_CAP_IRQCHIP:
1021 case KVM_CAP_HLT:
1022 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1023 case KVM_CAP_SET_TSS_ADDR:
07716717 1024 case KVM_CAP_EXT_CPUID:
c8076604 1025 case KVM_CAP_CLOCKSOURCE:
7837699f 1026 case KVM_CAP_PIT:
a28e4f5a 1027 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1028 case KVM_CAP_MP_STATE:
ed848624 1029 case KVM_CAP_SYNC_MMU:
52d939a0 1030 case KVM_CAP_REINJECT_CONTROL:
4925663a 1031 case KVM_CAP_IRQ_INJECT_STATUS:
e56d532f 1032 case KVM_CAP_ASSIGN_DEV_IRQ:
018d00d2
ZX
1033 r = 1;
1034 break;
542472b5
LV
1035 case KVM_CAP_COALESCED_MMIO:
1036 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1037 break;
774ead3a
AK
1038 case KVM_CAP_VAPIC:
1039 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1040 break;
f725230a
AK
1041 case KVM_CAP_NR_VCPUS:
1042 r = KVM_MAX_VCPUS;
1043 break;
a988b910
AK
1044 case KVM_CAP_NR_MEMSLOTS:
1045 r = KVM_MEMORY_SLOTS;
1046 break;
2f333bcb
MT
1047 case KVM_CAP_PV_MMU:
1048 r = !tdp_enabled;
1049 break;
62c476c7 1050 case KVM_CAP_IOMMU:
19de40a8 1051 r = iommu_found();
62c476c7 1052 break;
018d00d2
ZX
1053 default:
1054 r = 0;
1055 break;
1056 }
1057 return r;
1058
1059}
1060
043405e1
CO
1061long kvm_arch_dev_ioctl(struct file *filp,
1062 unsigned int ioctl, unsigned long arg)
1063{
1064 void __user *argp = (void __user *)arg;
1065 long r;
1066
1067 switch (ioctl) {
1068 case KVM_GET_MSR_INDEX_LIST: {
1069 struct kvm_msr_list __user *user_msr_list = argp;
1070 struct kvm_msr_list msr_list;
1071 unsigned n;
1072
1073 r = -EFAULT;
1074 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1075 goto out;
1076 n = msr_list.nmsrs;
1077 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1078 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1079 goto out;
1080 r = -E2BIG;
1081 if (n < num_msrs_to_save)
1082 goto out;
1083 r = -EFAULT;
1084 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1085 num_msrs_to_save * sizeof(u32)))
1086 goto out;
1087 if (copy_to_user(user_msr_list->indices
1088 + num_msrs_to_save * sizeof(u32),
1089 &emulated_msrs,
1090 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1091 goto out;
1092 r = 0;
1093 break;
1094 }
674eea0f
AK
1095 case KVM_GET_SUPPORTED_CPUID: {
1096 struct kvm_cpuid2 __user *cpuid_arg = argp;
1097 struct kvm_cpuid2 cpuid;
1098
1099 r = -EFAULT;
1100 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1101 goto out;
1102 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1103 cpuid_arg->entries);
674eea0f
AK
1104 if (r)
1105 goto out;
1106
1107 r = -EFAULT;
1108 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1109 goto out;
1110 r = 0;
1111 break;
1112 }
043405e1
CO
1113 default:
1114 r = -EINVAL;
1115 }
1116out:
1117 return r;
1118}
1119
313a3dc7
CO
1120void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1121{
1122 kvm_x86_ops->vcpu_load(vcpu, cpu);
c8076604 1123 kvm_request_guest_time_update(vcpu);
313a3dc7
CO
1124}
1125
1126void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1127{
1128 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1129 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1130}
1131
07716717 1132static int is_efer_nx(void)
313a3dc7 1133{
e286e86e 1134 unsigned long long efer = 0;
313a3dc7 1135
e286e86e 1136 rdmsrl_safe(MSR_EFER, &efer);
07716717
DK
1137 return efer & EFER_NX;
1138}
1139
1140static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1141{
1142 int i;
1143 struct kvm_cpuid_entry2 *e, *entry;
1144
313a3dc7 1145 entry = NULL;
ad312c7c
ZX
1146 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1147 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1148 if (e->function == 0x80000001) {
1149 entry = e;
1150 break;
1151 }
1152 }
07716717 1153 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1154 entry->edx &= ~(1 << 20);
1155 printk(KERN_INFO "kvm: guest NX capability removed\n");
1156 }
1157}
1158
07716717 1159/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1160static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1161 struct kvm_cpuid *cpuid,
1162 struct kvm_cpuid_entry __user *entries)
07716717
DK
1163{
1164 int r, i;
1165 struct kvm_cpuid_entry *cpuid_entries;
1166
1167 r = -E2BIG;
1168 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1169 goto out;
1170 r = -ENOMEM;
1171 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1172 if (!cpuid_entries)
1173 goto out;
1174 r = -EFAULT;
1175 if (copy_from_user(cpuid_entries, entries,
1176 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1177 goto out_free;
1178 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1179 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1180 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1181 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1182 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1183 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1184 vcpu->arch.cpuid_entries[i].index = 0;
1185 vcpu->arch.cpuid_entries[i].flags = 0;
1186 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1187 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1188 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1189 }
1190 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1191 cpuid_fix_nx_cap(vcpu);
1192 r = 0;
1193
1194out_free:
1195 vfree(cpuid_entries);
1196out:
1197 return r;
1198}
1199
1200static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1201 struct kvm_cpuid2 *cpuid,
1202 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1203{
1204 int r;
1205
1206 r = -E2BIG;
1207 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1208 goto out;
1209 r = -EFAULT;
ad312c7c 1210 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1211 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1212 goto out;
ad312c7c 1213 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1214 return 0;
1215
1216out:
1217 return r;
1218}
1219
07716717 1220static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1221 struct kvm_cpuid2 *cpuid,
1222 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1223{
1224 int r;
1225
1226 r = -E2BIG;
ad312c7c 1227 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1228 goto out;
1229 r = -EFAULT;
ad312c7c 1230 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1231 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1232 goto out;
1233 return 0;
1234
1235out:
ad312c7c 1236 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1237 return r;
1238}
1239
07716717 1240static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1241 u32 index)
07716717
DK
1242{
1243 entry->function = function;
1244 entry->index = index;
1245 cpuid_count(entry->function, entry->index,
19355475 1246 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1247 entry->flags = 0;
1248}
1249
1250static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1251 u32 index, int *nent, int maxnent)
1252{
1253 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1254 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1255 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1256 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
8d753f36 1257 bit(X86_FEATURE_MCE) |
07716717 1258 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
8d753f36
AK
1259 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_MTRR) |
1260 bit(X86_FEATURE_PGE) | bit(X86_FEATURE_MCA) |
1261 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PAT) |
1262 bit(X86_FEATURE_PSE36) |
07716717
DK
1263 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1264 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1265 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1266 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1267 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1268 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1269 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1270 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1271 bit(X86_FEATURE_PGE) |
1272 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1273 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1274 bit(X86_FEATURE_SYSCALL) |
334b8ad7 1275 (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) |
07716717
DK
1276#ifdef CONFIG_X86_64
1277 bit(X86_FEATURE_LM) |
1278#endif
1b2fd70c 1279 bit(X86_FEATURE_FXSR_OPT) |
07716717
DK
1280 bit(X86_FEATURE_MMXEXT) |
1281 bit(X86_FEATURE_3DNOWEXT) |
1282 bit(X86_FEATURE_3DNOW);
1283 const u32 kvm_supported_word3_x86_features =
1284 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1285 const u32 kvm_supported_word6_x86_features =
d8017474
AG
1286 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
1287 bit(X86_FEATURE_SVM);
07716717 1288
19355475 1289 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1290 get_cpu();
1291 do_cpuid_1_ent(entry, function, index);
1292 ++*nent;
1293
1294 switch (function) {
1295 case 0:
1296 entry->eax = min(entry->eax, (u32)0xb);
1297 break;
1298 case 1:
1299 entry->edx &= kvm_supported_word0_x86_features;
1300 entry->ecx &= kvm_supported_word3_x86_features;
1301 break;
1302 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1303 * may return different values. This forces us to get_cpu() before
1304 * issuing the first command, and also to emulate this annoying behavior
1305 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1306 case 2: {
1307 int t, times = entry->eax & 0xff;
1308
1309 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1310 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1311 for (t = 1; t < times && *nent < maxnent; ++t) {
1312 do_cpuid_1_ent(&entry[t], function, 0);
1313 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1314 ++*nent;
1315 }
1316 break;
1317 }
1318 /* function 4 and 0xb have additional index. */
1319 case 4: {
14af3f3c 1320 int i, cache_type;
07716717
DK
1321
1322 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1323 /* read more entries until cache_type is zero */
14af3f3c
HH
1324 for (i = 1; *nent < maxnent; ++i) {
1325 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1326 if (!cache_type)
1327 break;
14af3f3c
HH
1328 do_cpuid_1_ent(&entry[i], function, i);
1329 entry[i].flags |=
07716717
DK
1330 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1331 ++*nent;
1332 }
1333 break;
1334 }
1335 case 0xb: {
14af3f3c 1336 int i, level_type;
07716717
DK
1337
1338 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1339 /* read more entries until level_type is zero */
14af3f3c 1340 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1341 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1342 if (!level_type)
1343 break;
14af3f3c
HH
1344 do_cpuid_1_ent(&entry[i], function, i);
1345 entry[i].flags |=
07716717
DK
1346 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1347 ++*nent;
1348 }
1349 break;
1350 }
1351 case 0x80000000:
1352 entry->eax = min(entry->eax, 0x8000001a);
1353 break;
1354 case 0x80000001:
1355 entry->edx &= kvm_supported_word1_x86_features;
1356 entry->ecx &= kvm_supported_word6_x86_features;
1357 break;
1358 }
1359 put_cpu();
1360}
1361
674eea0f 1362static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1363 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1364{
1365 struct kvm_cpuid_entry2 *cpuid_entries;
1366 int limit, nent = 0, r = -E2BIG;
1367 u32 func;
1368
1369 if (cpuid->nent < 1)
1370 goto out;
1371 r = -ENOMEM;
1372 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1373 if (!cpuid_entries)
1374 goto out;
1375
1376 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1377 limit = cpuid_entries[0].eax;
1378 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1379 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1380 &nent, cpuid->nent);
07716717
DK
1381 r = -E2BIG;
1382 if (nent >= cpuid->nent)
1383 goto out_free;
1384
1385 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1386 limit = cpuid_entries[nent - 1].eax;
1387 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1388 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1389 &nent, cpuid->nent);
07716717
DK
1390 r = -EFAULT;
1391 if (copy_to_user(entries, cpuid_entries,
19355475 1392 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1393 goto out_free;
1394 cpuid->nent = nent;
1395 r = 0;
1396
1397out_free:
1398 vfree(cpuid_entries);
1399out:
1400 return r;
1401}
1402
313a3dc7
CO
1403static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1404 struct kvm_lapic_state *s)
1405{
1406 vcpu_load(vcpu);
ad312c7c 1407 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1408 vcpu_put(vcpu);
1409
1410 return 0;
1411}
1412
1413static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1414 struct kvm_lapic_state *s)
1415{
1416 vcpu_load(vcpu);
ad312c7c 1417 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1418 kvm_apic_post_state_restore(vcpu);
1419 vcpu_put(vcpu);
1420
1421 return 0;
1422}
1423
f77bc6a4
ZX
1424static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1425 struct kvm_interrupt *irq)
1426{
1427 if (irq->irq < 0 || irq->irq >= 256)
1428 return -EINVAL;
1429 if (irqchip_in_kernel(vcpu->kvm))
1430 return -ENXIO;
1431 vcpu_load(vcpu);
1432
ad312c7c
ZX
1433 set_bit(irq->irq, vcpu->arch.irq_pending);
1434 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1435
1436 vcpu_put(vcpu);
1437
1438 return 0;
1439}
1440
c4abb7c9
JK
1441static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1442{
1443 vcpu_load(vcpu);
1444 kvm_inject_nmi(vcpu);
1445 vcpu_put(vcpu);
1446
1447 return 0;
1448}
1449
b209749f
AK
1450static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1451 struct kvm_tpr_access_ctl *tac)
1452{
1453 if (tac->flags)
1454 return -EINVAL;
1455 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1456 return 0;
1457}
1458
313a3dc7
CO
1459long kvm_arch_vcpu_ioctl(struct file *filp,
1460 unsigned int ioctl, unsigned long arg)
1461{
1462 struct kvm_vcpu *vcpu = filp->private_data;
1463 void __user *argp = (void __user *)arg;
1464 int r;
b772ff36 1465 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1466
1467 switch (ioctl) {
1468 case KVM_GET_LAPIC: {
b772ff36 1469 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1470
b772ff36
DH
1471 r = -ENOMEM;
1472 if (!lapic)
1473 goto out;
1474 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1475 if (r)
1476 goto out;
1477 r = -EFAULT;
b772ff36 1478 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1479 goto out;
1480 r = 0;
1481 break;
1482 }
1483 case KVM_SET_LAPIC: {
b772ff36
DH
1484 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1485 r = -ENOMEM;
1486 if (!lapic)
1487 goto out;
313a3dc7 1488 r = -EFAULT;
b772ff36 1489 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1490 goto out;
b772ff36 1491 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1492 if (r)
1493 goto out;
1494 r = 0;
1495 break;
1496 }
f77bc6a4
ZX
1497 case KVM_INTERRUPT: {
1498 struct kvm_interrupt irq;
1499
1500 r = -EFAULT;
1501 if (copy_from_user(&irq, argp, sizeof irq))
1502 goto out;
1503 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1504 if (r)
1505 goto out;
1506 r = 0;
1507 break;
1508 }
c4abb7c9
JK
1509 case KVM_NMI: {
1510 r = kvm_vcpu_ioctl_nmi(vcpu);
1511 if (r)
1512 goto out;
1513 r = 0;
1514 break;
1515 }
313a3dc7
CO
1516 case KVM_SET_CPUID: {
1517 struct kvm_cpuid __user *cpuid_arg = argp;
1518 struct kvm_cpuid cpuid;
1519
1520 r = -EFAULT;
1521 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1522 goto out;
1523 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1524 if (r)
1525 goto out;
1526 break;
1527 }
07716717
DK
1528 case KVM_SET_CPUID2: {
1529 struct kvm_cpuid2 __user *cpuid_arg = argp;
1530 struct kvm_cpuid2 cpuid;
1531
1532 r = -EFAULT;
1533 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1534 goto out;
1535 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1536 cpuid_arg->entries);
07716717
DK
1537 if (r)
1538 goto out;
1539 break;
1540 }
1541 case KVM_GET_CPUID2: {
1542 struct kvm_cpuid2 __user *cpuid_arg = argp;
1543 struct kvm_cpuid2 cpuid;
1544
1545 r = -EFAULT;
1546 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1547 goto out;
1548 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1549 cpuid_arg->entries);
07716717
DK
1550 if (r)
1551 goto out;
1552 r = -EFAULT;
1553 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1554 goto out;
1555 r = 0;
1556 break;
1557 }
313a3dc7
CO
1558 case KVM_GET_MSRS:
1559 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1560 break;
1561 case KVM_SET_MSRS:
1562 r = msr_io(vcpu, argp, do_set_msr, 0);
1563 break;
b209749f
AK
1564 case KVM_TPR_ACCESS_REPORTING: {
1565 struct kvm_tpr_access_ctl tac;
1566
1567 r = -EFAULT;
1568 if (copy_from_user(&tac, argp, sizeof tac))
1569 goto out;
1570 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1571 if (r)
1572 goto out;
1573 r = -EFAULT;
1574 if (copy_to_user(argp, &tac, sizeof tac))
1575 goto out;
1576 r = 0;
1577 break;
1578 };
b93463aa
AK
1579 case KVM_SET_VAPIC_ADDR: {
1580 struct kvm_vapic_addr va;
1581
1582 r = -EINVAL;
1583 if (!irqchip_in_kernel(vcpu->kvm))
1584 goto out;
1585 r = -EFAULT;
1586 if (copy_from_user(&va, argp, sizeof va))
1587 goto out;
1588 r = 0;
1589 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1590 break;
1591 }
313a3dc7
CO
1592 default:
1593 r = -EINVAL;
1594 }
1595out:
7a6ce84c 1596 kfree(lapic);
313a3dc7
CO
1597 return r;
1598}
1599
1fe779f8
CO
1600static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1601{
1602 int ret;
1603
1604 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1605 return -1;
1606 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1607 return ret;
1608}
1609
1610static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1611 u32 kvm_nr_mmu_pages)
1612{
1613 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1614 return -EINVAL;
1615
72dc67a6 1616 down_write(&kvm->slots_lock);
1fe779f8
CO
1617
1618 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1619 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1620
72dc67a6 1621 up_write(&kvm->slots_lock);
1fe779f8
CO
1622 return 0;
1623}
1624
1625static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1626{
f05e70ac 1627 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1628}
1629
e9f85cde
ZX
1630gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1631{
1632 int i;
1633 struct kvm_mem_alias *alias;
1634
d69fb81f
ZX
1635 for (i = 0; i < kvm->arch.naliases; ++i) {
1636 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1637 if (gfn >= alias->base_gfn
1638 && gfn < alias->base_gfn + alias->npages)
1639 return alias->target_gfn + gfn - alias->base_gfn;
1640 }
1641 return gfn;
1642}
1643
1fe779f8
CO
1644/*
1645 * Set a new alias region. Aliases map a portion of physical memory into
1646 * another portion. This is useful for memory windows, for example the PC
1647 * VGA region.
1648 */
1649static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1650 struct kvm_memory_alias *alias)
1651{
1652 int r, n;
1653 struct kvm_mem_alias *p;
1654
1655 r = -EINVAL;
1656 /* General sanity checks */
1657 if (alias->memory_size & (PAGE_SIZE - 1))
1658 goto out;
1659 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1660 goto out;
1661 if (alias->slot >= KVM_ALIAS_SLOTS)
1662 goto out;
1663 if (alias->guest_phys_addr + alias->memory_size
1664 < alias->guest_phys_addr)
1665 goto out;
1666 if (alias->target_phys_addr + alias->memory_size
1667 < alias->target_phys_addr)
1668 goto out;
1669
72dc67a6 1670 down_write(&kvm->slots_lock);
a1708ce8 1671 spin_lock(&kvm->mmu_lock);
1fe779f8 1672
d69fb81f 1673 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1674 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1675 p->npages = alias->memory_size >> PAGE_SHIFT;
1676 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1677
1678 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1679 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1680 break;
d69fb81f 1681 kvm->arch.naliases = n;
1fe779f8 1682
a1708ce8 1683 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1684 kvm_mmu_zap_all(kvm);
1685
72dc67a6 1686 up_write(&kvm->slots_lock);
1fe779f8
CO
1687
1688 return 0;
1689
1690out:
1691 return r;
1692}
1693
1694static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1695{
1696 int r;
1697
1698 r = 0;
1699 switch (chip->chip_id) {
1700 case KVM_IRQCHIP_PIC_MASTER:
1701 memcpy(&chip->chip.pic,
1702 &pic_irqchip(kvm)->pics[0],
1703 sizeof(struct kvm_pic_state));
1704 break;
1705 case KVM_IRQCHIP_PIC_SLAVE:
1706 memcpy(&chip->chip.pic,
1707 &pic_irqchip(kvm)->pics[1],
1708 sizeof(struct kvm_pic_state));
1709 break;
1710 case KVM_IRQCHIP_IOAPIC:
1711 memcpy(&chip->chip.ioapic,
1712 ioapic_irqchip(kvm),
1713 sizeof(struct kvm_ioapic_state));
1714 break;
1715 default:
1716 r = -EINVAL;
1717 break;
1718 }
1719 return r;
1720}
1721
1722static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1723{
1724 int r;
1725
1726 r = 0;
1727 switch (chip->chip_id) {
1728 case KVM_IRQCHIP_PIC_MASTER:
1729 memcpy(&pic_irqchip(kvm)->pics[0],
1730 &chip->chip.pic,
1731 sizeof(struct kvm_pic_state));
1732 break;
1733 case KVM_IRQCHIP_PIC_SLAVE:
1734 memcpy(&pic_irqchip(kvm)->pics[1],
1735 &chip->chip.pic,
1736 sizeof(struct kvm_pic_state));
1737 break;
1738 case KVM_IRQCHIP_IOAPIC:
1739 memcpy(ioapic_irqchip(kvm),
1740 &chip->chip.ioapic,
1741 sizeof(struct kvm_ioapic_state));
1742 break;
1743 default:
1744 r = -EINVAL;
1745 break;
1746 }
1747 kvm_pic_update_irq(pic_irqchip(kvm));
1748 return r;
1749}
1750
e0f63cb9
SY
1751static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1752{
1753 int r = 0;
1754
1755 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1756 return r;
1757}
1758
1759static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1760{
1761 int r = 0;
1762
1763 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1764 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1765 return r;
1766}
1767
52d939a0
MT
1768static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1769 struct kvm_reinject_control *control)
1770{
1771 if (!kvm->arch.vpit)
1772 return -ENXIO;
1773 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1774 return 0;
1775}
1776
5bb064dc
ZX
1777/*
1778 * Get (and clear) the dirty memory log for a memory slot.
1779 */
1780int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1781 struct kvm_dirty_log *log)
1782{
1783 int r;
1784 int n;
1785 struct kvm_memory_slot *memslot;
1786 int is_dirty = 0;
1787
72dc67a6 1788 down_write(&kvm->slots_lock);
5bb064dc
ZX
1789
1790 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1791 if (r)
1792 goto out;
1793
1794 /* If nothing is dirty, don't bother messing with page tables. */
1795 if (is_dirty) {
1796 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1797 kvm_flush_remote_tlbs(kvm);
1798 memslot = &kvm->memslots[log->slot];
1799 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1800 memset(memslot->dirty_bitmap, 0, n);
1801 }
1802 r = 0;
1803out:
72dc67a6 1804 up_write(&kvm->slots_lock);
5bb064dc
ZX
1805 return r;
1806}
1807
1fe779f8
CO
1808long kvm_arch_vm_ioctl(struct file *filp,
1809 unsigned int ioctl, unsigned long arg)
1810{
1811 struct kvm *kvm = filp->private_data;
1812 void __user *argp = (void __user *)arg;
1813 int r = -EINVAL;
f0d66275
DH
1814 /*
1815 * This union makes it completely explicit to gcc-3.x
1816 * that these two variables' stack usage should be
1817 * combined, not added together.
1818 */
1819 union {
1820 struct kvm_pit_state ps;
1821 struct kvm_memory_alias alias;
1822 } u;
1fe779f8
CO
1823
1824 switch (ioctl) {
1825 case KVM_SET_TSS_ADDR:
1826 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1827 if (r < 0)
1828 goto out;
1829 break;
1830 case KVM_SET_MEMORY_REGION: {
1831 struct kvm_memory_region kvm_mem;
1832 struct kvm_userspace_memory_region kvm_userspace_mem;
1833
1834 r = -EFAULT;
1835 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1836 goto out;
1837 kvm_userspace_mem.slot = kvm_mem.slot;
1838 kvm_userspace_mem.flags = kvm_mem.flags;
1839 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1840 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1841 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1842 if (r)
1843 goto out;
1844 break;
1845 }
1846 case KVM_SET_NR_MMU_PAGES:
1847 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1848 if (r)
1849 goto out;
1850 break;
1851 case KVM_GET_NR_MMU_PAGES:
1852 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1853 break;
f0d66275 1854 case KVM_SET_MEMORY_ALIAS:
1fe779f8 1855 r = -EFAULT;
f0d66275 1856 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 1857 goto out;
f0d66275 1858 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
1859 if (r)
1860 goto out;
1861 break;
1fe779f8
CO
1862 case KVM_CREATE_IRQCHIP:
1863 r = -ENOMEM;
d7deeeb0
ZX
1864 kvm->arch.vpic = kvm_create_pic(kvm);
1865 if (kvm->arch.vpic) {
1fe779f8
CO
1866 r = kvm_ioapic_init(kvm);
1867 if (r) {
d7deeeb0
ZX
1868 kfree(kvm->arch.vpic);
1869 kvm->arch.vpic = NULL;
1fe779f8
CO
1870 goto out;
1871 }
1872 } else
1873 goto out;
399ec807
AK
1874 r = kvm_setup_default_irq_routing(kvm);
1875 if (r) {
1876 kfree(kvm->arch.vpic);
1877 kfree(kvm->arch.vioapic);
1878 goto out;
1879 }
1fe779f8 1880 break;
7837699f 1881 case KVM_CREATE_PIT:
269e05e4
AK
1882 mutex_lock(&kvm->lock);
1883 r = -EEXIST;
1884 if (kvm->arch.vpit)
1885 goto create_pit_unlock;
7837699f
SY
1886 r = -ENOMEM;
1887 kvm->arch.vpit = kvm_create_pit(kvm);
1888 if (kvm->arch.vpit)
1889 r = 0;
269e05e4
AK
1890 create_pit_unlock:
1891 mutex_unlock(&kvm->lock);
7837699f 1892 break;
4925663a 1893 case KVM_IRQ_LINE_STATUS:
1fe779f8
CO
1894 case KVM_IRQ_LINE: {
1895 struct kvm_irq_level irq_event;
1896
1897 r = -EFAULT;
1898 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1899 goto out;
1900 if (irqchip_in_kernel(kvm)) {
4925663a 1901 __s32 status;
1fe779f8 1902 mutex_lock(&kvm->lock);
4925663a
GN
1903 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1904 irq_event.irq, irq_event.level);
1fe779f8 1905 mutex_unlock(&kvm->lock);
4925663a
GN
1906 if (ioctl == KVM_IRQ_LINE_STATUS) {
1907 irq_event.status = status;
1908 if (copy_to_user(argp, &irq_event,
1909 sizeof irq_event))
1910 goto out;
1911 }
1fe779f8
CO
1912 r = 0;
1913 }
1914 break;
1915 }
1916 case KVM_GET_IRQCHIP: {
1917 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1918 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1919
f0d66275
DH
1920 r = -ENOMEM;
1921 if (!chip)
1fe779f8 1922 goto out;
f0d66275
DH
1923 r = -EFAULT;
1924 if (copy_from_user(chip, argp, sizeof *chip))
1925 goto get_irqchip_out;
1fe779f8
CO
1926 r = -ENXIO;
1927 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1928 goto get_irqchip_out;
1929 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 1930 if (r)
f0d66275 1931 goto get_irqchip_out;
1fe779f8 1932 r = -EFAULT;
f0d66275
DH
1933 if (copy_to_user(argp, chip, sizeof *chip))
1934 goto get_irqchip_out;
1fe779f8 1935 r = 0;
f0d66275
DH
1936 get_irqchip_out:
1937 kfree(chip);
1938 if (r)
1939 goto out;
1fe779f8
CO
1940 break;
1941 }
1942 case KVM_SET_IRQCHIP: {
1943 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1944 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1945
f0d66275
DH
1946 r = -ENOMEM;
1947 if (!chip)
1fe779f8 1948 goto out;
f0d66275
DH
1949 r = -EFAULT;
1950 if (copy_from_user(chip, argp, sizeof *chip))
1951 goto set_irqchip_out;
1fe779f8
CO
1952 r = -ENXIO;
1953 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1954 goto set_irqchip_out;
1955 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 1956 if (r)
f0d66275 1957 goto set_irqchip_out;
1fe779f8 1958 r = 0;
f0d66275
DH
1959 set_irqchip_out:
1960 kfree(chip);
1961 if (r)
1962 goto out;
1fe779f8
CO
1963 break;
1964 }
e0f63cb9 1965 case KVM_GET_PIT: {
e0f63cb9 1966 r = -EFAULT;
f0d66275 1967 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1968 goto out;
1969 r = -ENXIO;
1970 if (!kvm->arch.vpit)
1971 goto out;
f0d66275 1972 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
1973 if (r)
1974 goto out;
1975 r = -EFAULT;
f0d66275 1976 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1977 goto out;
1978 r = 0;
1979 break;
1980 }
1981 case KVM_SET_PIT: {
e0f63cb9 1982 r = -EFAULT;
f0d66275 1983 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
1984 goto out;
1985 r = -ENXIO;
1986 if (!kvm->arch.vpit)
1987 goto out;
f0d66275 1988 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
1989 if (r)
1990 goto out;
1991 r = 0;
1992 break;
1993 }
52d939a0
MT
1994 case KVM_REINJECT_CONTROL: {
1995 struct kvm_reinject_control control;
1996 r = -EFAULT;
1997 if (copy_from_user(&control, argp, sizeof(control)))
1998 goto out;
1999 r = kvm_vm_ioctl_reinject(kvm, &control);
2000 if (r)
2001 goto out;
2002 r = 0;
2003 break;
2004 }
1fe779f8
CO
2005 default:
2006 ;
2007 }
2008out:
2009 return r;
2010}
2011
a16b043c 2012static void kvm_init_msr_list(void)
043405e1
CO
2013{
2014 u32 dummy[2];
2015 unsigned i, j;
2016
2017 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2018 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2019 continue;
2020 if (j < i)
2021 msrs_to_save[j] = msrs_to_save[i];
2022 j++;
2023 }
2024 num_msrs_to_save = j;
2025}
2026
bbd9b64e
CO
2027/*
2028 * Only apic need an MMIO device hook, so shortcut now..
2029 */
2030static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
2031 gpa_t addr, int len,
2032 int is_write)
bbd9b64e
CO
2033{
2034 struct kvm_io_device *dev;
2035
ad312c7c
ZX
2036 if (vcpu->arch.apic) {
2037 dev = &vcpu->arch.apic->dev;
92760499 2038 if (dev->in_range(dev, addr, len, is_write))
bbd9b64e
CO
2039 return dev;
2040 }
2041 return NULL;
2042}
2043
2044
2045static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2046 gpa_t addr, int len,
2047 int is_write)
bbd9b64e
CO
2048{
2049 struct kvm_io_device *dev;
2050
92760499 2051 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2052 if (dev == NULL)
92760499
LV
2053 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2054 is_write);
bbd9b64e
CO
2055 return dev;
2056}
2057
cded19f3
HE
2058static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2059 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2060{
2061 void *data = val;
10589a46 2062 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2063
2064 while (bytes) {
ad312c7c 2065 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2066 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2067 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2068 int ret;
2069
10589a46
MT
2070 if (gpa == UNMAPPED_GVA) {
2071 r = X86EMUL_PROPAGATE_FAULT;
2072 goto out;
2073 }
77c2002e 2074 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2075 if (ret < 0) {
2076 r = X86EMUL_UNHANDLEABLE;
2077 goto out;
2078 }
bbd9b64e 2079
77c2002e
IE
2080 bytes -= toread;
2081 data += toread;
2082 addr += toread;
bbd9b64e 2083 }
10589a46 2084out:
10589a46 2085 return r;
bbd9b64e 2086}
77c2002e 2087
cded19f3
HE
2088static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2089 struct kvm_vcpu *vcpu)
77c2002e
IE
2090{
2091 void *data = val;
2092 int r = X86EMUL_CONTINUE;
2093
2094 while (bytes) {
2095 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2096 unsigned offset = addr & (PAGE_SIZE-1);
2097 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2098 int ret;
2099
2100 if (gpa == UNMAPPED_GVA) {
2101 r = X86EMUL_PROPAGATE_FAULT;
2102 goto out;
2103 }
2104 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2105 if (ret < 0) {
2106 r = X86EMUL_UNHANDLEABLE;
2107 goto out;
2108 }
2109
2110 bytes -= towrite;
2111 data += towrite;
2112 addr += towrite;
2113 }
2114out:
2115 return r;
2116}
2117
bbd9b64e 2118
bbd9b64e
CO
2119static int emulator_read_emulated(unsigned long addr,
2120 void *val,
2121 unsigned int bytes,
2122 struct kvm_vcpu *vcpu)
2123{
2124 struct kvm_io_device *mmio_dev;
2125 gpa_t gpa;
2126
2127 if (vcpu->mmio_read_completed) {
2128 memcpy(val, vcpu->mmio_data, bytes);
2129 vcpu->mmio_read_completed = 0;
2130 return X86EMUL_CONTINUE;
2131 }
2132
ad312c7c 2133 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2134
2135 /* For APIC access vmexit */
2136 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2137 goto mmio;
2138
77c2002e
IE
2139 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2140 == X86EMUL_CONTINUE)
bbd9b64e
CO
2141 return X86EMUL_CONTINUE;
2142 if (gpa == UNMAPPED_GVA)
2143 return X86EMUL_PROPAGATE_FAULT;
2144
2145mmio:
2146 /*
2147 * Is this MMIO handled locally?
2148 */
10589a46 2149 mutex_lock(&vcpu->kvm->lock);
92760499 2150 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
2151 if (mmio_dev) {
2152 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 2153 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2154 return X86EMUL_CONTINUE;
2155 }
10589a46 2156 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2157
2158 vcpu->mmio_needed = 1;
2159 vcpu->mmio_phys_addr = gpa;
2160 vcpu->mmio_size = bytes;
2161 vcpu->mmio_is_write = 0;
2162
2163 return X86EMUL_UNHANDLEABLE;
2164}
2165
3200f405 2166int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2167 const void *val, int bytes)
bbd9b64e
CO
2168{
2169 int ret;
2170
2171 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2172 if (ret < 0)
bbd9b64e 2173 return 0;
ad218f85 2174 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2175 return 1;
2176}
2177
2178static int emulator_write_emulated_onepage(unsigned long addr,
2179 const void *val,
2180 unsigned int bytes,
2181 struct kvm_vcpu *vcpu)
2182{
2183 struct kvm_io_device *mmio_dev;
10589a46
MT
2184 gpa_t gpa;
2185
10589a46 2186 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2187
2188 if (gpa == UNMAPPED_GVA) {
c3c91fee 2189 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2190 return X86EMUL_PROPAGATE_FAULT;
2191 }
2192
2193 /* For APIC access vmexit */
2194 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2195 goto mmio;
2196
2197 if (emulator_write_phys(vcpu, gpa, val, bytes))
2198 return X86EMUL_CONTINUE;
2199
2200mmio:
2201 /*
2202 * Is this MMIO handled locally?
2203 */
10589a46 2204 mutex_lock(&vcpu->kvm->lock);
92760499 2205 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
2206 if (mmio_dev) {
2207 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 2208 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2209 return X86EMUL_CONTINUE;
2210 }
10589a46 2211 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2212
2213 vcpu->mmio_needed = 1;
2214 vcpu->mmio_phys_addr = gpa;
2215 vcpu->mmio_size = bytes;
2216 vcpu->mmio_is_write = 1;
2217 memcpy(vcpu->mmio_data, val, bytes);
2218
2219 return X86EMUL_CONTINUE;
2220}
2221
2222int emulator_write_emulated(unsigned long addr,
2223 const void *val,
2224 unsigned int bytes,
2225 struct kvm_vcpu *vcpu)
2226{
2227 /* Crossing a page boundary? */
2228 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2229 int rc, now;
2230
2231 now = -addr & ~PAGE_MASK;
2232 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2233 if (rc != X86EMUL_CONTINUE)
2234 return rc;
2235 addr += now;
2236 val += now;
2237 bytes -= now;
2238 }
2239 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2240}
2241EXPORT_SYMBOL_GPL(emulator_write_emulated);
2242
2243static int emulator_cmpxchg_emulated(unsigned long addr,
2244 const void *old,
2245 const void *new,
2246 unsigned int bytes,
2247 struct kvm_vcpu *vcpu)
2248{
2249 static int reported;
2250
2251 if (!reported) {
2252 reported = 1;
2253 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2254 }
2bacc55c
MT
2255#ifndef CONFIG_X86_64
2256 /* guests cmpxchg8b have to be emulated atomically */
2257 if (bytes == 8) {
10589a46 2258 gpa_t gpa;
2bacc55c 2259 struct page *page;
c0b49b0d 2260 char *kaddr;
2bacc55c
MT
2261 u64 val;
2262
10589a46
MT
2263 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2264
2bacc55c
MT
2265 if (gpa == UNMAPPED_GVA ||
2266 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2267 goto emul_write;
2268
2269 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2270 goto emul_write;
2271
2272 val = *(u64 *)new;
72dc67a6 2273
2bacc55c 2274 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2275
c0b49b0d
AM
2276 kaddr = kmap_atomic(page, KM_USER0);
2277 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2278 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2279 kvm_release_page_dirty(page);
2280 }
3200f405 2281emul_write:
2bacc55c
MT
2282#endif
2283
bbd9b64e
CO
2284 return emulator_write_emulated(addr, new, bytes, vcpu);
2285}
2286
2287static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2288{
2289 return kvm_x86_ops->get_segment_base(vcpu, seg);
2290}
2291
2292int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2293{
a7052897 2294 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2295 return X86EMUL_CONTINUE;
2296}
2297
2298int emulate_clts(struct kvm_vcpu *vcpu)
2299{
54e445ca 2300 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2301 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2302 return X86EMUL_CONTINUE;
2303}
2304
2305int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2306{
2307 struct kvm_vcpu *vcpu = ctxt->vcpu;
2308
2309 switch (dr) {
2310 case 0 ... 3:
2311 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2312 return X86EMUL_CONTINUE;
2313 default:
b8688d51 2314 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2315 return X86EMUL_UNHANDLEABLE;
2316 }
2317}
2318
2319int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2320{
2321 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2322 int exception;
2323
2324 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2325 if (exception) {
2326 /* FIXME: better handling */
2327 return X86EMUL_UNHANDLEABLE;
2328 }
2329 return X86EMUL_CONTINUE;
2330}
2331
2332void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2333{
bbd9b64e 2334 u8 opcodes[4];
5fdbf976 2335 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2336 unsigned long rip_linear;
2337
f76c710d 2338 if (!printk_ratelimit())
bbd9b64e
CO
2339 return;
2340
25be4608
GC
2341 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2342
77c2002e 2343 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2344
2345 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2346 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2347}
2348EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2349
14af3f3c 2350static struct x86_emulate_ops emulate_ops = {
77c2002e 2351 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2352 .read_emulated = emulator_read_emulated,
2353 .write_emulated = emulator_write_emulated,
2354 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2355};
2356
5fdbf976
MT
2357static void cache_all_regs(struct kvm_vcpu *vcpu)
2358{
2359 kvm_register_read(vcpu, VCPU_REGS_RAX);
2360 kvm_register_read(vcpu, VCPU_REGS_RSP);
2361 kvm_register_read(vcpu, VCPU_REGS_RIP);
2362 vcpu->arch.regs_dirty = ~0;
2363}
2364
bbd9b64e
CO
2365int emulate_instruction(struct kvm_vcpu *vcpu,
2366 struct kvm_run *run,
2367 unsigned long cr2,
2368 u16 error_code,
571008da 2369 int emulation_type)
bbd9b64e
CO
2370{
2371 int r;
571008da 2372 struct decode_cache *c;
bbd9b64e 2373
26eef70c 2374 kvm_clear_exception_queue(vcpu);
ad312c7c 2375 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2376 /*
2377 * TODO: fix x86_emulate.c to use guest_read/write_register
2378 * instead of direct ->regs accesses, can save hundred cycles
2379 * on Intel for instructions that don't read/change RSP, for
2380 * for example.
2381 */
2382 cache_all_regs(vcpu);
bbd9b64e
CO
2383
2384 vcpu->mmio_is_write = 0;
ad312c7c 2385 vcpu->arch.pio.string = 0;
bbd9b64e 2386
571008da 2387 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2388 int cs_db, cs_l;
2389 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2390
ad312c7c
ZX
2391 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2392 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2393 vcpu->arch.emulate_ctxt.mode =
2394 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2395 ? X86EMUL_MODE_REAL : cs_l
2396 ? X86EMUL_MODE_PROT64 : cs_db
2397 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2398
ad312c7c 2399 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2400
2401 /* Reject the instructions other than VMCALL/VMMCALL when
2402 * try to emulate invalid opcode */
2403 c = &vcpu->arch.emulate_ctxt.decode;
2404 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2405 (!(c->twobyte && c->b == 0x01 &&
2406 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2407 c->modrm_mod == 3 && c->modrm_rm == 1)))
2408 return EMULATE_FAIL;
2409
f2b5756b 2410 ++vcpu->stat.insn_emulation;
bbd9b64e 2411 if (r) {
f2b5756b 2412 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2413 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2414 return EMULATE_DONE;
2415 return EMULATE_FAIL;
2416 }
2417 }
2418
ba8afb6b
GN
2419 if (emulation_type & EMULTYPE_SKIP) {
2420 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2421 return EMULATE_DONE;
2422 }
2423
ad312c7c 2424 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2425
ad312c7c 2426 if (vcpu->arch.pio.string)
bbd9b64e
CO
2427 return EMULATE_DO_MMIO;
2428
2429 if ((r || vcpu->mmio_is_write) && run) {
2430 run->exit_reason = KVM_EXIT_MMIO;
2431 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2432 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2433 run->mmio.len = vcpu->mmio_size;
2434 run->mmio.is_write = vcpu->mmio_is_write;
2435 }
2436
2437 if (r) {
2438 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2439 return EMULATE_DONE;
2440 if (!vcpu->mmio_needed) {
2441 kvm_report_emulation_failure(vcpu, "mmio");
2442 return EMULATE_FAIL;
2443 }
2444 return EMULATE_DO_MMIO;
2445 }
2446
ad312c7c 2447 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2448
2449 if (vcpu->mmio_is_write) {
2450 vcpu->mmio_needed = 0;
2451 return EMULATE_DO_MMIO;
2452 }
2453
2454 return EMULATE_DONE;
2455}
2456EXPORT_SYMBOL_GPL(emulate_instruction);
2457
de7d789a
CO
2458static int pio_copy_data(struct kvm_vcpu *vcpu)
2459{
ad312c7c 2460 void *p = vcpu->arch.pio_data;
0f346074 2461 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2462 unsigned bytes;
0f346074 2463 int ret;
de7d789a 2464
ad312c7c
ZX
2465 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2466 if (vcpu->arch.pio.in)
0f346074 2467 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2468 else
0f346074
IE
2469 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2470 return ret;
de7d789a
CO
2471}
2472
2473int complete_pio(struct kvm_vcpu *vcpu)
2474{
ad312c7c 2475 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2476 long delta;
2477 int r;
5fdbf976 2478 unsigned long val;
de7d789a
CO
2479
2480 if (!io->string) {
5fdbf976
MT
2481 if (io->in) {
2482 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2483 memcpy(&val, vcpu->arch.pio_data, io->size);
2484 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2485 }
de7d789a
CO
2486 } else {
2487 if (io->in) {
2488 r = pio_copy_data(vcpu);
5fdbf976 2489 if (r)
de7d789a 2490 return r;
de7d789a
CO
2491 }
2492
2493 delta = 1;
2494 if (io->rep) {
2495 delta *= io->cur_count;
2496 /*
2497 * The size of the register should really depend on
2498 * current address size.
2499 */
5fdbf976
MT
2500 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2501 val -= delta;
2502 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2503 }
2504 if (io->down)
2505 delta = -delta;
2506 delta *= io->size;
5fdbf976
MT
2507 if (io->in) {
2508 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2509 val += delta;
2510 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2511 } else {
2512 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2513 val += delta;
2514 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2515 }
de7d789a
CO
2516 }
2517
de7d789a
CO
2518 io->count -= io->cur_count;
2519 io->cur_count = 0;
2520
2521 return 0;
2522}
2523
2524static void kernel_pio(struct kvm_io_device *pio_dev,
2525 struct kvm_vcpu *vcpu,
2526 void *pd)
2527{
2528 /* TODO: String I/O for in kernel device */
2529
2530 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2531 if (vcpu->arch.pio.in)
2532 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2533 vcpu->arch.pio.size,
de7d789a
CO
2534 pd);
2535 else
ad312c7c
ZX
2536 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2537 vcpu->arch.pio.size,
de7d789a
CO
2538 pd);
2539 mutex_unlock(&vcpu->kvm->lock);
2540}
2541
2542static void pio_string_write(struct kvm_io_device *pio_dev,
2543 struct kvm_vcpu *vcpu)
2544{
ad312c7c
ZX
2545 struct kvm_pio_request *io = &vcpu->arch.pio;
2546 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2547 int i;
2548
2549 mutex_lock(&vcpu->kvm->lock);
2550 for (i = 0; i < io->cur_count; i++) {
2551 kvm_iodevice_write(pio_dev, io->port,
2552 io->size,
2553 pd);
2554 pd += io->size;
2555 }
2556 mutex_unlock(&vcpu->kvm->lock);
2557}
2558
2559static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2560 gpa_t addr, int len,
2561 int is_write)
de7d789a 2562{
92760499 2563 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2564}
2565
2566int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2567 int size, unsigned port)
2568{
2569 struct kvm_io_device *pio_dev;
5fdbf976 2570 unsigned long val;
de7d789a
CO
2571
2572 vcpu->run->exit_reason = KVM_EXIT_IO;
2573 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2574 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2575 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2576 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2577 vcpu->run->io.port = vcpu->arch.pio.port = port;
2578 vcpu->arch.pio.in = in;
2579 vcpu->arch.pio.string = 0;
2580 vcpu->arch.pio.down = 0;
ad312c7c 2581 vcpu->arch.pio.rep = 0;
de7d789a 2582
2714d1d3
FEL
2583 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2584 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2585 handler);
2586 else
2587 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2588 handler);
2589
5fdbf976
MT
2590 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2591 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2592
92760499 2593 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2594 if (pio_dev) {
ad312c7c 2595 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2596 complete_pio(vcpu);
2597 return 1;
2598 }
2599 return 0;
2600}
2601EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2602
2603int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2604 int size, unsigned long count, int down,
2605 gva_t address, int rep, unsigned port)
2606{
2607 unsigned now, in_page;
0f346074 2608 int ret = 0;
de7d789a
CO
2609 struct kvm_io_device *pio_dev;
2610
2611 vcpu->run->exit_reason = KVM_EXIT_IO;
2612 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2613 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2614 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2615 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2616 vcpu->run->io.port = vcpu->arch.pio.port = port;
2617 vcpu->arch.pio.in = in;
2618 vcpu->arch.pio.string = 1;
2619 vcpu->arch.pio.down = down;
ad312c7c 2620 vcpu->arch.pio.rep = rep;
de7d789a 2621
2714d1d3
FEL
2622 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2623 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2624 handler);
2625 else
2626 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2627 handler);
2628
de7d789a
CO
2629 if (!count) {
2630 kvm_x86_ops->skip_emulated_instruction(vcpu);
2631 return 1;
2632 }
2633
2634 if (!down)
2635 in_page = PAGE_SIZE - offset_in_page(address);
2636 else
2637 in_page = offset_in_page(address) + size;
2638 now = min(count, (unsigned long)in_page / size);
0f346074 2639 if (!now)
de7d789a 2640 now = 1;
de7d789a
CO
2641 if (down) {
2642 /*
2643 * String I/O in reverse. Yuck. Kill the guest, fix later.
2644 */
2645 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2646 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2647 return 1;
2648 }
2649 vcpu->run->io.count = now;
ad312c7c 2650 vcpu->arch.pio.cur_count = now;
de7d789a 2651
ad312c7c 2652 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2653 kvm_x86_ops->skip_emulated_instruction(vcpu);
2654
0f346074 2655 vcpu->arch.pio.guest_gva = address;
de7d789a 2656
92760499
LV
2657 pio_dev = vcpu_find_pio_dev(vcpu, port,
2658 vcpu->arch.pio.cur_count,
2659 !vcpu->arch.pio.in);
ad312c7c 2660 if (!vcpu->arch.pio.in) {
de7d789a
CO
2661 /* string PIO write */
2662 ret = pio_copy_data(vcpu);
0f346074
IE
2663 if (ret == X86EMUL_PROPAGATE_FAULT) {
2664 kvm_inject_gp(vcpu, 0);
2665 return 1;
2666 }
2667 if (ret == 0 && pio_dev) {
de7d789a
CO
2668 pio_string_write(pio_dev, vcpu);
2669 complete_pio(vcpu);
ad312c7c 2670 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2671 ret = 1;
2672 }
2673 } else if (pio_dev)
2674 pr_unimpl(vcpu, "no string pio read support yet, "
2675 "port %x size %d count %ld\n",
2676 port, size, count);
2677
2678 return ret;
2679}
2680EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2681
c8076604
GH
2682static void bounce_off(void *info)
2683{
2684 /* nothing */
2685}
2686
2687static unsigned int ref_freq;
2688static unsigned long tsc_khz_ref;
2689
2690static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2691 void *data)
2692{
2693 struct cpufreq_freqs *freq = data;
2694 struct kvm *kvm;
2695 struct kvm_vcpu *vcpu;
2696 int i, send_ipi = 0;
2697
2698 if (!ref_freq)
2699 ref_freq = freq->old;
2700
2701 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2702 return 0;
2703 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2704 return 0;
2705 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2706
2707 spin_lock(&kvm_lock);
2708 list_for_each_entry(kvm, &vm_list, vm_list) {
2709 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2710 vcpu = kvm->vcpus[i];
2711 if (!vcpu)
2712 continue;
2713 if (vcpu->cpu != freq->cpu)
2714 continue;
2715 if (!kvm_request_guest_time_update(vcpu))
2716 continue;
2717 if (vcpu->cpu != smp_processor_id())
2718 send_ipi++;
2719 }
2720 }
2721 spin_unlock(&kvm_lock);
2722
2723 if (freq->old < freq->new && send_ipi) {
2724 /*
2725 * We upscale the frequency. Must make the guest
2726 * doesn't see old kvmclock values while running with
2727 * the new frequency, otherwise we risk the guest sees
2728 * time go backwards.
2729 *
2730 * In case we update the frequency for another cpu
2731 * (which might be in guest context) send an interrupt
2732 * to kick the cpu out of guest context. Next time
2733 * guest context is entered kvmclock will be updated,
2734 * so the guest will not see stale values.
2735 */
2736 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2737 }
2738 return 0;
2739}
2740
2741static struct notifier_block kvmclock_cpufreq_notifier_block = {
2742 .notifier_call = kvmclock_cpufreq_notifier
2743};
2744
f8c16bba 2745int kvm_arch_init(void *opaque)
043405e1 2746{
c8076604 2747 int r, cpu;
f8c16bba
ZX
2748 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2749
f8c16bba
ZX
2750 if (kvm_x86_ops) {
2751 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2752 r = -EEXIST;
2753 goto out;
f8c16bba
ZX
2754 }
2755
2756 if (!ops->cpu_has_kvm_support()) {
2757 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2758 r = -EOPNOTSUPP;
2759 goto out;
f8c16bba
ZX
2760 }
2761 if (ops->disabled_by_bios()) {
2762 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2763 r = -EOPNOTSUPP;
2764 goto out;
f8c16bba
ZX
2765 }
2766
97db56ce
AK
2767 r = kvm_mmu_module_init();
2768 if (r)
2769 goto out;
2770
2771 kvm_init_msr_list();
2772
f8c16bba 2773 kvm_x86_ops = ops;
56c6d28a 2774 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2775 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2776 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4b12f0de 2777 PT_DIRTY_MASK, PT64_NX_MASK, 0);
c8076604
GH
2778
2779 for_each_possible_cpu(cpu)
2780 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
2781 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2782 tsc_khz_ref = tsc_khz;
2783 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
2784 CPUFREQ_TRANSITION_NOTIFIER);
2785 }
2786
f8c16bba 2787 return 0;
56c6d28a
ZX
2788
2789out:
56c6d28a 2790 return r;
043405e1 2791}
8776e519 2792
f8c16bba
ZX
2793void kvm_arch_exit(void)
2794{
888d256e
JK
2795 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2796 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2797 CPUFREQ_TRANSITION_NOTIFIER);
f8c16bba 2798 kvm_x86_ops = NULL;
56c6d28a
ZX
2799 kvm_mmu_module_exit();
2800}
f8c16bba 2801
8776e519
HB
2802int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2803{
2804 ++vcpu->stat.halt_exits;
2714d1d3 2805 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2806 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2807 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
2808 return 1;
2809 } else {
2810 vcpu->run->exit_reason = KVM_EXIT_HLT;
2811 return 0;
2812 }
2813}
2814EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2815
2f333bcb
MT
2816static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2817 unsigned long a1)
2818{
2819 if (is_long_mode(vcpu))
2820 return a0;
2821 else
2822 return a0 | ((gpa_t)a1 << 32);
2823}
2824
8776e519
HB
2825int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2826{
2827 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2828 int r = 1;
8776e519 2829
5fdbf976
MT
2830 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2831 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2832 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2833 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2834 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 2835
2714d1d3
FEL
2836 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2837
8776e519
HB
2838 if (!is_long_mode(vcpu)) {
2839 nr &= 0xFFFFFFFF;
2840 a0 &= 0xFFFFFFFF;
2841 a1 &= 0xFFFFFFFF;
2842 a2 &= 0xFFFFFFFF;
2843 a3 &= 0xFFFFFFFF;
2844 }
2845
2846 switch (nr) {
b93463aa
AK
2847 case KVM_HC_VAPIC_POLL_IRQ:
2848 ret = 0;
2849 break;
2f333bcb
MT
2850 case KVM_HC_MMU_OP:
2851 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2852 break;
8776e519
HB
2853 default:
2854 ret = -KVM_ENOSYS;
2855 break;
2856 }
5fdbf976 2857 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 2858 ++vcpu->stat.hypercalls;
2f333bcb 2859 return r;
8776e519
HB
2860}
2861EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2862
2863int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2864{
2865 char instruction[3];
2866 int ret = 0;
5fdbf976 2867 unsigned long rip = kvm_rip_read(vcpu);
8776e519 2868
8776e519
HB
2869
2870 /*
2871 * Blow out the MMU to ensure that no other VCPU has an active mapping
2872 * to ensure that the updated hypercall appears atomically across all
2873 * VCPUs.
2874 */
2875 kvm_mmu_zap_all(vcpu->kvm);
2876
8776e519 2877 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 2878 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
2879 != X86EMUL_CONTINUE)
2880 ret = -EFAULT;
2881
8776e519
HB
2882 return ret;
2883}
2884
2885static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2886{
2887 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2888}
2889
2890void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2891{
2892 struct descriptor_table dt = { limit, base };
2893
2894 kvm_x86_ops->set_gdt(vcpu, &dt);
2895}
2896
2897void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2898{
2899 struct descriptor_table dt = { limit, base };
2900
2901 kvm_x86_ops->set_idt(vcpu, &dt);
2902}
2903
2904void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2905 unsigned long *rflags)
2906{
2d3ad1f4 2907 kvm_lmsw(vcpu, msw);
8776e519
HB
2908 *rflags = kvm_x86_ops->get_rflags(vcpu);
2909}
2910
2911unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2912{
54e445ca
JR
2913 unsigned long value;
2914
8776e519
HB
2915 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2916 switch (cr) {
2917 case 0:
54e445ca
JR
2918 value = vcpu->arch.cr0;
2919 break;
8776e519 2920 case 2:
54e445ca
JR
2921 value = vcpu->arch.cr2;
2922 break;
8776e519 2923 case 3:
54e445ca
JR
2924 value = vcpu->arch.cr3;
2925 break;
8776e519 2926 case 4:
54e445ca
JR
2927 value = vcpu->arch.cr4;
2928 break;
152ff9be 2929 case 8:
54e445ca
JR
2930 value = kvm_get_cr8(vcpu);
2931 break;
8776e519 2932 default:
b8688d51 2933 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2934 return 0;
2935 }
54e445ca
JR
2936 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2937 (u32)((u64)value >> 32), handler);
2938
2939 return value;
8776e519
HB
2940}
2941
2942void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2943 unsigned long *rflags)
2944{
54e445ca
JR
2945 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2946 (u32)((u64)val >> 32), handler);
2947
8776e519
HB
2948 switch (cr) {
2949 case 0:
2d3ad1f4 2950 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2951 *rflags = kvm_x86_ops->get_rflags(vcpu);
2952 break;
2953 case 2:
ad312c7c 2954 vcpu->arch.cr2 = val;
8776e519
HB
2955 break;
2956 case 3:
2d3ad1f4 2957 kvm_set_cr3(vcpu, val);
8776e519
HB
2958 break;
2959 case 4:
2d3ad1f4 2960 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2961 break;
152ff9be 2962 case 8:
2d3ad1f4 2963 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2964 break;
8776e519 2965 default:
b8688d51 2966 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2967 }
2968}
2969
07716717
DK
2970static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2971{
ad312c7c
ZX
2972 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2973 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2974
2975 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2976 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 2977 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 2978 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2979 if (ej->function == e->function) {
2980 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2981 return j;
2982 }
2983 }
2984 return 0; /* silence gcc, even though control never reaches here */
2985}
2986
2987/* find an entry with matching function, matching index (if needed), and that
2988 * should be read next (if it's stateful) */
2989static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2990 u32 function, u32 index)
2991{
2992 if (e->function != function)
2993 return 0;
2994 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2995 return 0;
2996 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 2997 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
2998 return 0;
2999 return 1;
3000}
3001
d8017474
AG
3002struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3003 u32 function, u32 index)
8776e519
HB
3004{
3005 int i;
d8017474 3006 struct kvm_cpuid_entry2 *best = NULL;
8776e519 3007
ad312c7c 3008 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
3009 struct kvm_cpuid_entry2 *e;
3010
ad312c7c 3011 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
3012 if (is_matching_cpuid_entry(e, function, index)) {
3013 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3014 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
3015 best = e;
3016 break;
3017 }
3018 /*
3019 * Both basic or both extended?
3020 */
3021 if (((e->function ^ function) & 0x80000000) == 0)
3022 if (!best || e->function > best->function)
3023 best = e;
3024 }
d8017474
AG
3025 return best;
3026}
3027
82725b20
DE
3028int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3029{
3030 struct kvm_cpuid_entry2 *best;
3031
3032 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3033 if (best)
3034 return best->eax & 0xff;
3035 return 36;
3036}
3037
d8017474
AG
3038void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3039{
3040 u32 function, index;
3041 struct kvm_cpuid_entry2 *best;
3042
3043 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3044 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3045 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3046 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3047 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3048 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3049 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 3050 if (best) {
5fdbf976
MT
3051 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3052 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3053 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3054 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 3055 }
8776e519 3056 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 3057 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
3058 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
3059 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
3060 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
3061 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
3062}
3063EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 3064
b6c7a5dc
HB
3065/*
3066 * Check if userspace requested an interrupt window, and that the
3067 * interrupt window is open.
3068 *
3069 * No need to exit to userspace if we already have an interrupt queued.
3070 */
3071static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3072 struct kvm_run *kvm_run)
3073{
8061823a 3074 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
b6c7a5dc 3075 kvm_run->request_interrupt_window &&
5df56646 3076 kvm_arch_interrupt_allowed(vcpu));
b6c7a5dc
HB
3077}
3078
3079static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3080 struct kvm_run *kvm_run)
3081{
3082 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 3083 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 3084 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 3085 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3086 kvm_run->ready_for_interrupt_injection = 1;
4531220b 3087 else
b6c7a5dc 3088 kvm_run->ready_for_interrupt_injection =
5df56646 3089 (kvm_arch_interrupt_allowed(vcpu) &&
8061823a 3090 !kvm_cpu_has_interrupt(vcpu));
b6c7a5dc
HB
3091}
3092
b93463aa
AK
3093static void vapic_enter(struct kvm_vcpu *vcpu)
3094{
3095 struct kvm_lapic *apic = vcpu->arch.apic;
3096 struct page *page;
3097
3098 if (!apic || !apic->vapic_addr)
3099 return;
3100
3101 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
3102
3103 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
3104}
3105
3106static void vapic_exit(struct kvm_vcpu *vcpu)
3107{
3108 struct kvm_lapic *apic = vcpu->arch.apic;
3109
3110 if (!apic || !apic->vapic_addr)
3111 return;
3112
f8b78fa3 3113 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3114 kvm_release_page_dirty(apic->vapic_page);
3115 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 3116 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3117}
3118
95ba8273
GN
3119static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3120{
3121 int max_irr, tpr;
3122
3123 if (!kvm_x86_ops->update_cr8_intercept)
3124 return;
3125
3126 max_irr = kvm_lapic_find_highest_irr(vcpu);
3127
3128 if (max_irr != -1)
3129 max_irr >>= 4;
3130
3131 tpr = kvm_lapic_get_cr8(vcpu);
3132
3133 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3134}
3135
3136static void inject_irq(struct kvm_vcpu *vcpu)
3137{
3138 /* try to reinject previous events if any */
3139 if (vcpu->arch.nmi_injected) {
3140 kvm_x86_ops->set_nmi(vcpu);
3141 return;
3142 }
3143
3144 if (vcpu->arch.interrupt.pending) {
3145 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3146 return;
3147 }
3148
3149 /* try to inject new event if pending */
3150 if (vcpu->arch.nmi_pending) {
3151 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3152 vcpu->arch.nmi_pending = false;
3153 vcpu->arch.nmi_injected = true;
3154 kvm_x86_ops->set_nmi(vcpu);
3155 }
3156 } else if (kvm_cpu_has_interrupt(vcpu)) {
3157 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3158 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3159 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3160 }
3161 }
3162}
3163
3164static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3165{
3166 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3167 kvm_run->request_interrupt_window;
3168
3169 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3170 kvm_x86_ops->drop_interrupt_shadow(vcpu);
3171
3172 inject_irq(vcpu);
3173
3174 /* enable NMI/IRQ window open exits if needed */
3175 if (vcpu->arch.nmi_pending)
3176 kvm_x86_ops->enable_nmi_window(vcpu);
3177 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3178 kvm_x86_ops->enable_irq_window(vcpu);
3179}
3180
d7690175 3181static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
3182{
3183 int r;
3184
2e53d63a
MT
3185 if (vcpu->requests)
3186 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3187 kvm_mmu_unload(vcpu);
3188
b6c7a5dc
HB
3189 r = kvm_mmu_reload(vcpu);
3190 if (unlikely(r))
3191 goto out;
3192
2f52d58c
AK
3193 if (vcpu->requests) {
3194 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3195 __kvm_migrate_timers(vcpu);
c8076604
GH
3196 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3197 kvm_write_guest_time(vcpu);
4731d4c7
MT
3198 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3199 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3200 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3201 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3202 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3203 &vcpu->requests)) {
3204 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3205 r = 0;
3206 goto out;
3207 }
71c4dfaf
JR
3208 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3209 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3210 r = 0;
3211 goto out;
3212 }
2f52d58c 3213 }
b93463aa 3214
b6c7a5dc
HB
3215 preempt_disable();
3216
3217 kvm_x86_ops->prepare_guest_switch(vcpu);
3218 kvm_load_guest_fpu(vcpu);
3219
3220 local_irq_disable();
3221
d7690175 3222 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3223 local_irq_enable();
3224 preempt_enable();
3225 r = 1;
3226 goto out;
3227 }
3228
e9571ed5
MT
3229 vcpu->guest_mode = 1;
3230 /*
3231 * Make sure that guest_mode assignment won't happen after
3232 * testing the pending IRQ vector bitmap.
3233 */
3234 smp_wmb();
3235
ad312c7c 3236 if (vcpu->arch.exception.pending)
298101da 3237 __queue_exception(vcpu);
eb9774f0 3238 else
95ba8273 3239 inject_pending_irq(vcpu, kvm_run);
b6c7a5dc 3240
95ba8273
GN
3241 if (kvm_lapic_enabled(vcpu)) {
3242 if (!vcpu->arch.apic->vapic_addr)
3243 update_cr8_intercept(vcpu);
3244 else
3245 kvm_lapic_sync_to_vapic(vcpu);
3246 }
b93463aa 3247
3200f405
MT
3248 up_read(&vcpu->kvm->slots_lock);
3249
b6c7a5dc
HB
3250 kvm_guest_enter();
3251
42dbaa5a
JK
3252 get_debugreg(vcpu->arch.host_dr6, 6);
3253 get_debugreg(vcpu->arch.host_dr7, 7);
3254 if (unlikely(vcpu->arch.switch_db_regs)) {
3255 get_debugreg(vcpu->arch.host_db[0], 0);
3256 get_debugreg(vcpu->arch.host_db[1], 1);
3257 get_debugreg(vcpu->arch.host_db[2], 2);
3258 get_debugreg(vcpu->arch.host_db[3], 3);
3259
3260 set_debugreg(0, 7);
3261 set_debugreg(vcpu->arch.eff_db[0], 0);
3262 set_debugreg(vcpu->arch.eff_db[1], 1);
3263 set_debugreg(vcpu->arch.eff_db[2], 2);
3264 set_debugreg(vcpu->arch.eff_db[3], 3);
3265 }
b6c7a5dc 3266
2714d1d3 3267 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3268 kvm_x86_ops->run(vcpu, kvm_run);
3269
42dbaa5a
JK
3270 if (unlikely(vcpu->arch.switch_db_regs)) {
3271 set_debugreg(0, 7);
3272 set_debugreg(vcpu->arch.host_db[0], 0);
3273 set_debugreg(vcpu->arch.host_db[1], 1);
3274 set_debugreg(vcpu->arch.host_db[2], 2);
3275 set_debugreg(vcpu->arch.host_db[3], 3);
3276 }
3277 set_debugreg(vcpu->arch.host_dr6, 6);
3278 set_debugreg(vcpu->arch.host_dr7, 7);
3279
b6c7a5dc
HB
3280 vcpu->guest_mode = 0;
3281 local_irq_enable();
3282
3283 ++vcpu->stat.exits;
3284
3285 /*
3286 * We must have an instruction between local_irq_enable() and
3287 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3288 * the interrupt shadow. The stat.exits increment will do nicely.
3289 * But we need to prevent reordering, hence this barrier():
3290 */
3291 barrier();
3292
3293 kvm_guest_exit();
3294
3295 preempt_enable();
3296
3200f405
MT
3297 down_read(&vcpu->kvm->slots_lock);
3298
b6c7a5dc
HB
3299 /*
3300 * Profile KVM exit RIPs:
3301 */
3302 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3303 unsigned long rip = kvm_rip_read(vcpu);
3304 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3305 }
3306
298101da 3307
b93463aa
AK
3308 kvm_lapic_sync_from_vapic(vcpu);
3309
b6c7a5dc 3310 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3311out:
3312 return r;
3313}
b6c7a5dc 3314
09cec754 3315
d7690175
MT
3316static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3317{
3318 int r;
3319
3320 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3321 pr_debug("vcpu %d received sipi with vector # %x\n",
3322 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3323 kvm_lapic_reset(vcpu);
5f179287 3324 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3325 if (r)
3326 return r;
3327 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3328 }
3329
d7690175
MT
3330 down_read(&vcpu->kvm->slots_lock);
3331 vapic_enter(vcpu);
3332
3333 r = 1;
3334 while (r > 0) {
af2152f5 3335 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3336 r = vcpu_enter_guest(vcpu, kvm_run);
3337 else {
3338 up_read(&vcpu->kvm->slots_lock);
3339 kvm_vcpu_block(vcpu);
3340 down_read(&vcpu->kvm->slots_lock);
3341 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
09cec754
GN
3342 {
3343 switch(vcpu->arch.mp_state) {
3344 case KVM_MP_STATE_HALTED:
d7690175 3345 vcpu->arch.mp_state =
09cec754
GN
3346 KVM_MP_STATE_RUNNABLE;
3347 case KVM_MP_STATE_RUNNABLE:
3348 break;
3349 case KVM_MP_STATE_SIPI_RECEIVED:
3350 default:
3351 r = -EINTR;
3352 break;
3353 }
3354 }
d7690175
MT
3355 }
3356
09cec754
GN
3357 if (r <= 0)
3358 break;
3359
3360 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3361 if (kvm_cpu_has_pending_timer(vcpu))
3362 kvm_inject_pending_timer_irqs(vcpu);
3363
3364 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3365 r = -EINTR;
3366 kvm_run->exit_reason = KVM_EXIT_INTR;
3367 ++vcpu->stat.request_irq_exits;
3368 }
3369 if (signal_pending(current)) {
3370 r = -EINTR;
3371 kvm_run->exit_reason = KVM_EXIT_INTR;
3372 ++vcpu->stat.signal_exits;
3373 }
3374 if (need_resched()) {
3375 up_read(&vcpu->kvm->slots_lock);
3376 kvm_resched(vcpu);
3377 down_read(&vcpu->kvm->slots_lock);
d7690175 3378 }
b6c7a5dc
HB
3379 }
3380
d7690175 3381 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3382 post_kvm_run_save(vcpu, kvm_run);
3383
b93463aa
AK
3384 vapic_exit(vcpu);
3385
b6c7a5dc
HB
3386 return r;
3387}
3388
3389int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3390{
3391 int r;
3392 sigset_t sigsaved;
3393
3394 vcpu_load(vcpu);
3395
ac9f6dc0
AK
3396 if (vcpu->sigset_active)
3397 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3398
a4535290 3399 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3400 kvm_vcpu_block(vcpu);
d7690175 3401 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3402 r = -EAGAIN;
3403 goto out;
b6c7a5dc
HB
3404 }
3405
b6c7a5dc
HB
3406 /* re-sync apic's tpr */
3407 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3408 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3409
ad312c7c 3410 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3411 r = complete_pio(vcpu);
3412 if (r)
3413 goto out;
3414 }
3415#if CONFIG_HAS_IOMEM
3416 if (vcpu->mmio_needed) {
3417 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3418 vcpu->mmio_read_completed = 1;
3419 vcpu->mmio_needed = 0;
3200f405
MT
3420
3421 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3422 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3423 vcpu->arch.mmio_fault_cr2, 0,
3424 EMULTYPE_NO_DECODE);
3200f405 3425 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3426 if (r == EMULATE_DO_MMIO) {
3427 /*
3428 * Read-modify-write. Back to userspace.
3429 */
3430 r = 0;
3431 goto out;
3432 }
3433 }
3434#endif
5fdbf976
MT
3435 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3436 kvm_register_write(vcpu, VCPU_REGS_RAX,
3437 kvm_run->hypercall.ret);
b6c7a5dc
HB
3438
3439 r = __vcpu_run(vcpu, kvm_run);
3440
3441out:
3442 if (vcpu->sigset_active)
3443 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3444
3445 vcpu_put(vcpu);
3446 return r;
3447}
3448
3449int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3450{
3451 vcpu_load(vcpu);
3452
5fdbf976
MT
3453 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3454 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3455 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3456 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3457 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3458 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3459 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3460 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3461#ifdef CONFIG_X86_64
5fdbf976
MT
3462 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3463 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3464 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3465 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3466 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3467 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3468 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3469 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3470#endif
3471
5fdbf976 3472 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3473 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3474
3475 /*
3476 * Don't leak debug flags in case they were set for guest debugging
3477 */
d0bfb940 3478 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3479 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3480
3481 vcpu_put(vcpu);
3482
3483 return 0;
3484}
3485
3486int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3487{
3488 vcpu_load(vcpu);
3489
5fdbf976
MT
3490 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3491 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3492 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3493 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3494 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3495 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3496 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3497 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3498#ifdef CONFIG_X86_64
5fdbf976
MT
3499 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3500 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3501 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3502 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3503 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3504 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3505 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3506 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3507
b6c7a5dc
HB
3508#endif
3509
5fdbf976 3510 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3511 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3512
b6c7a5dc 3513
b4f14abd
JK
3514 vcpu->arch.exception.pending = false;
3515
b6c7a5dc
HB
3516 vcpu_put(vcpu);
3517
3518 return 0;
3519}
3520
3e6e0aab
GT
3521void kvm_get_segment(struct kvm_vcpu *vcpu,
3522 struct kvm_segment *var, int seg)
b6c7a5dc 3523{
14af3f3c 3524 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3525}
3526
3527void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3528{
3529 struct kvm_segment cs;
3530
3e6e0aab 3531 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3532 *db = cs.db;
3533 *l = cs.l;
3534}
3535EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3536
3537int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3538 struct kvm_sregs *sregs)
3539{
3540 struct descriptor_table dt;
b6c7a5dc
HB
3541
3542 vcpu_load(vcpu);
3543
3e6e0aab
GT
3544 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3545 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3546 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3547 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3548 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3549 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3550
3e6e0aab
GT
3551 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3552 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3553
3554 kvm_x86_ops->get_idt(vcpu, &dt);
3555 sregs->idt.limit = dt.limit;
3556 sregs->idt.base = dt.base;
3557 kvm_x86_ops->get_gdt(vcpu, &dt);
3558 sregs->gdt.limit = dt.limit;
3559 sregs->gdt.base = dt.base;
3560
3561 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3562 sregs->cr0 = vcpu->arch.cr0;
3563 sregs->cr2 = vcpu->arch.cr2;
3564 sregs->cr3 = vcpu->arch.cr3;
3565 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3566 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3567 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3568 sregs->apic_base = kvm_get_apic_base(vcpu);
3569
16d7a191 3570 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc
HB
3571 memset(sregs->interrupt_bitmap, 0,
3572 sizeof sregs->interrupt_bitmap);
16d7a191 3573 else
ad312c7c 3574 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3575 sizeof sregs->interrupt_bitmap);
3576
14d0bc1f
GN
3577 if (vcpu->arch.interrupt.pending)
3578 set_bit(vcpu->arch.interrupt.nr,
3579 (unsigned long *)sregs->interrupt_bitmap);
16d7a191 3580
b6c7a5dc
HB
3581 vcpu_put(vcpu);
3582
3583 return 0;
3584}
3585
62d9f0db
MT
3586int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3587 struct kvm_mp_state *mp_state)
3588{
3589 vcpu_load(vcpu);
3590 mp_state->mp_state = vcpu->arch.mp_state;
3591 vcpu_put(vcpu);
3592 return 0;
3593}
3594
3595int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3596 struct kvm_mp_state *mp_state)
3597{
3598 vcpu_load(vcpu);
3599 vcpu->arch.mp_state = mp_state->mp_state;
3600 vcpu_put(vcpu);
3601 return 0;
3602}
3603
3e6e0aab 3604static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3605 struct kvm_segment *var, int seg)
3606{
14af3f3c 3607 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3608}
3609
37817f29
IE
3610static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3611 struct kvm_segment *kvm_desct)
3612{
3613 kvm_desct->base = seg_desc->base0;
3614 kvm_desct->base |= seg_desc->base1 << 16;
3615 kvm_desct->base |= seg_desc->base2 << 24;
3616 kvm_desct->limit = seg_desc->limit0;
3617 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3618 if (seg_desc->g) {
3619 kvm_desct->limit <<= 12;
3620 kvm_desct->limit |= 0xfff;
3621 }
37817f29
IE
3622 kvm_desct->selector = selector;
3623 kvm_desct->type = seg_desc->type;
3624 kvm_desct->present = seg_desc->p;
3625 kvm_desct->dpl = seg_desc->dpl;
3626 kvm_desct->db = seg_desc->d;
3627 kvm_desct->s = seg_desc->s;
3628 kvm_desct->l = seg_desc->l;
3629 kvm_desct->g = seg_desc->g;
3630 kvm_desct->avl = seg_desc->avl;
3631 if (!selector)
3632 kvm_desct->unusable = 1;
3633 else
3634 kvm_desct->unusable = 0;
3635 kvm_desct->padding = 0;
3636}
3637
b8222ad2
AS
3638static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3639 u16 selector,
3640 struct descriptor_table *dtable)
37817f29
IE
3641{
3642 if (selector & 1 << 2) {
3643 struct kvm_segment kvm_seg;
3644
3e6e0aab 3645 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3646
3647 if (kvm_seg.unusable)
3648 dtable->limit = 0;
3649 else
3650 dtable->limit = kvm_seg.limit;
3651 dtable->base = kvm_seg.base;
3652 }
3653 else
3654 kvm_x86_ops->get_gdt(vcpu, dtable);
3655}
3656
3657/* allowed just for 8 bytes segments */
3658static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3659 struct desc_struct *seg_desc)
3660{
98899aa0 3661 gpa_t gpa;
37817f29
IE
3662 struct descriptor_table dtable;
3663 u16 index = selector >> 3;
3664
b8222ad2 3665 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3666
3667 if (dtable.limit < index * 8 + 7) {
3668 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3669 return 1;
3670 }
98899aa0
MT
3671 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3672 gpa += index * 8;
3673 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3674}
3675
3676/* allowed just for 8 bytes segments */
3677static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3678 struct desc_struct *seg_desc)
3679{
98899aa0 3680 gpa_t gpa;
37817f29
IE
3681 struct descriptor_table dtable;
3682 u16 index = selector >> 3;
3683
b8222ad2 3684 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3685
3686 if (dtable.limit < index * 8 + 7)
3687 return 1;
98899aa0
MT
3688 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3689 gpa += index * 8;
3690 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3691}
3692
3693static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3694 struct desc_struct *seg_desc)
3695{
3696 u32 base_addr;
3697
3698 base_addr = seg_desc->base0;
3699 base_addr |= (seg_desc->base1 << 16);
3700 base_addr |= (seg_desc->base2 << 24);
3701
98899aa0 3702 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3703}
3704
37817f29
IE
3705static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3706{
3707 struct kvm_segment kvm_seg;
3708
3e6e0aab 3709 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3710 return kvm_seg.selector;
3711}
3712
3713static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3714 u16 selector,
3715 struct kvm_segment *kvm_seg)
3716{
3717 struct desc_struct seg_desc;
3718
3719 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3720 return 1;
3721 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3722 return 0;
3723}
3724
2259e3a7 3725static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3726{
3727 struct kvm_segment segvar = {
3728 .base = selector << 4,
3729 .limit = 0xffff,
3730 .selector = selector,
3731 .type = 3,
3732 .present = 1,
3733 .dpl = 3,
3734 .db = 0,
3735 .s = 1,
3736 .l = 0,
3737 .g = 0,
3738 .avl = 0,
3739 .unusable = 0,
3740 };
3741 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3742 return 0;
3743}
3744
3e6e0aab
GT
3745int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3746 int type_bits, int seg)
37817f29
IE
3747{
3748 struct kvm_segment kvm_seg;
3749
f4bbd9aa
AK
3750 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3751 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3752 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3753 return 1;
3754 kvm_seg.type |= type_bits;
3755
3756 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3757 seg != VCPU_SREG_LDTR)
3758 if (!kvm_seg.s)
3759 kvm_seg.unusable = 1;
3760
3e6e0aab 3761 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3762 return 0;
3763}
3764
3765static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3766 struct tss_segment_32 *tss)
3767{
3768 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3769 tss->eip = kvm_rip_read(vcpu);
37817f29 3770 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3771 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3772 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3773 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3774 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3775 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3776 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3777 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3778 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3779 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3780 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3781 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3782 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3783 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3784 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3785 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
37817f29
IE
3786}
3787
3788static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3789 struct tss_segment_32 *tss)
3790{
3791 kvm_set_cr3(vcpu, tss->cr3);
3792
5fdbf976 3793 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
3794 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3795
5fdbf976
MT
3796 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3797 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3798 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3799 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3800 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3801 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3802 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3803 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 3804
3e6e0aab 3805 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
3806 return 1;
3807
3e6e0aab 3808 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3809 return 1;
3810
3e6e0aab 3811 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3812 return 1;
3813
3e6e0aab 3814 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3815 return 1;
3816
3e6e0aab 3817 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3818 return 1;
3819
3e6e0aab 3820 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
3821 return 1;
3822
3e6e0aab 3823 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
3824 return 1;
3825 return 0;
3826}
3827
3828static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3829 struct tss_segment_16 *tss)
3830{
5fdbf976 3831 tss->ip = kvm_rip_read(vcpu);
37817f29 3832 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3833 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3834 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3835 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3836 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3837 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3838 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3839 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3840 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3841
3842 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3843 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3844 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3845 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3846 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3847 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3848}
3849
3850static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3851 struct tss_segment_16 *tss)
3852{
5fdbf976 3853 kvm_rip_write(vcpu, tss->ip);
37817f29 3854 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
3855 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3856 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3857 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3858 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3859 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3860 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3861 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3862 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 3863
3e6e0aab 3864 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
3865 return 1;
3866
3e6e0aab 3867 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3868 return 1;
3869
3e6e0aab 3870 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3871 return 1;
3872
3e6e0aab 3873 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3874 return 1;
3875
3e6e0aab 3876 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3877 return 1;
3878 return 0;
3879}
3880
8b2cf73c 3881static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37
GN
3882 u16 old_tss_sel, u32 old_tss_base,
3883 struct desc_struct *nseg_desc)
37817f29
IE
3884{
3885 struct tss_segment_16 tss_segment_16;
3886 int ret = 0;
3887
34198bf8
MT
3888 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3889 sizeof tss_segment_16))
37817f29
IE
3890 goto out;
3891
3892 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 3893
34198bf8
MT
3894 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3895 sizeof tss_segment_16))
37817f29 3896 goto out;
34198bf8
MT
3897
3898 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3899 &tss_segment_16, sizeof tss_segment_16))
3900 goto out;
3901
b237ac37
GN
3902 if (old_tss_sel != 0xffff) {
3903 tss_segment_16.prev_task_link = old_tss_sel;
3904
3905 if (kvm_write_guest(vcpu->kvm,
3906 get_tss_base_addr(vcpu, nseg_desc),
3907 &tss_segment_16.prev_task_link,
3908 sizeof tss_segment_16.prev_task_link))
3909 goto out;
3910 }
3911
37817f29
IE
3912 if (load_state_from_tss16(vcpu, &tss_segment_16))
3913 goto out;
3914
3915 ret = 1;
3916out:
3917 return ret;
3918}
3919
8b2cf73c 3920static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37 3921 u16 old_tss_sel, u32 old_tss_base,
37817f29
IE
3922 struct desc_struct *nseg_desc)
3923{
3924 struct tss_segment_32 tss_segment_32;
3925 int ret = 0;
3926
34198bf8
MT
3927 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3928 sizeof tss_segment_32))
37817f29
IE
3929 goto out;
3930
3931 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 3932
34198bf8
MT
3933 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3934 sizeof tss_segment_32))
3935 goto out;
3936
3937 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3938 &tss_segment_32, sizeof tss_segment_32))
37817f29 3939 goto out;
34198bf8 3940
b237ac37
GN
3941 if (old_tss_sel != 0xffff) {
3942 tss_segment_32.prev_task_link = old_tss_sel;
3943
3944 if (kvm_write_guest(vcpu->kvm,
3945 get_tss_base_addr(vcpu, nseg_desc),
3946 &tss_segment_32.prev_task_link,
3947 sizeof tss_segment_32.prev_task_link))
3948 goto out;
3949 }
3950
37817f29
IE
3951 if (load_state_from_tss32(vcpu, &tss_segment_32))
3952 goto out;
3953
3954 ret = 1;
3955out:
3956 return ret;
3957}
3958
3959int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3960{
3961 struct kvm_segment tr_seg;
3962 struct desc_struct cseg_desc;
3963 struct desc_struct nseg_desc;
3964 int ret = 0;
34198bf8
MT
3965 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3966 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 3967
34198bf8 3968 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 3969
34198bf8
MT
3970 /* FIXME: Handle errors. Failure to read either TSS or their
3971 * descriptors should generate a pagefault.
3972 */
37817f29
IE
3973 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3974 goto out;
3975
34198bf8 3976 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
3977 goto out;
3978
37817f29
IE
3979 if (reason != TASK_SWITCH_IRET) {
3980 int cpl;
3981
3982 cpl = kvm_x86_ops->get_cpl(vcpu);
3983 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3984 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3985 return 1;
3986 }
3987 }
3988
3989 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3990 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3991 return 1;
3992 }
3993
3994 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 3995 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 3996 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
3997 }
3998
3999 if (reason == TASK_SWITCH_IRET) {
4000 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4001 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4002 }
4003
64a7ec06
GN
4004 /* set back link to prev task only if NT bit is set in eflags
4005 note that old_tss_sel is not used afetr this point */
4006 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4007 old_tss_sel = 0xffff;
37817f29 4008
b237ac37
GN
4009 /* set back link to prev task only if NT bit is set in eflags
4010 note that old_tss_sel is not used afetr this point */
4011 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4012 old_tss_sel = 0xffff;
4013
37817f29 4014 if (nseg_desc.type & 8)
b237ac37
GN
4015 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4016 old_tss_base, &nseg_desc);
37817f29 4017 else
b237ac37
GN
4018 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4019 old_tss_base, &nseg_desc);
37817f29
IE
4020
4021 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4022 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4023 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4024 }
4025
4026 if (reason != TASK_SWITCH_IRET) {
3fe913e7 4027 nseg_desc.type |= (1 << 1);
37817f29
IE
4028 save_guest_segment_descriptor(vcpu, tss_selector,
4029 &nseg_desc);
4030 }
4031
4032 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4033 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4034 tr_seg.type = 11;
3e6e0aab 4035 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 4036out:
37817f29
IE
4037 return ret;
4038}
4039EXPORT_SYMBOL_GPL(kvm_task_switch);
4040
b6c7a5dc
HB
4041int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4042 struct kvm_sregs *sregs)
4043{
4044 int mmu_reset_needed = 0;
4045 int i, pending_vec, max_bits;
4046 struct descriptor_table dt;
4047
4048 vcpu_load(vcpu);
4049
4050 dt.limit = sregs->idt.limit;
4051 dt.base = sregs->idt.base;
4052 kvm_x86_ops->set_idt(vcpu, &dt);
4053 dt.limit = sregs->gdt.limit;
4054 dt.base = sregs->gdt.base;
4055 kvm_x86_ops->set_gdt(vcpu, &dt);
4056
ad312c7c
ZX
4057 vcpu->arch.cr2 = sregs->cr2;
4058 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
59839dff
MT
4059
4060 down_read(&vcpu->kvm->slots_lock);
4061 if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
4062 vcpu->arch.cr3 = sregs->cr3;
4063 else
4064 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
4065 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc 4066
2d3ad1f4 4067 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 4068
ad312c7c 4069 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 4070 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
4071 kvm_set_apic_base(vcpu, sregs->apic_base);
4072
4073 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4074
ad312c7c 4075 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 4076 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 4077 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 4078
ad312c7c 4079 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
4080 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4081 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 4082 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
4083
4084 if (mmu_reset_needed)
4085 kvm_mmu_reset_context(vcpu);
4086
4087 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
4088 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
4089 sizeof vcpu->arch.irq_pending);
4090 vcpu->arch.irq_summary = 0;
4091 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
4092 if (vcpu->arch.irq_pending[i])
4093 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
4094 } else {
4095 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4096 pending_vec = find_first_bit(
4097 (const unsigned long *)sregs->interrupt_bitmap,
4098 max_bits);
4099 /* Only pending external irq is handled here */
4100 if (pending_vec < max_bits) {
14d0bc1f
GN
4101 kvm_queue_interrupt(vcpu, pending_vec);
4102 pr_debug("Set back pending irq %d\n", pending_vec);
b6c7a5dc 4103 }
e4825800 4104 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
4105 }
4106
3e6e0aab
GT
4107 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4108 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4109 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4110 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4111 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4112 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 4113
3e6e0aab
GT
4114 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4115 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 4116
9c3e4aab
MT
4117 /* Older userspace won't unhalt the vcpu on reset. */
4118 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
4119 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4120 !(vcpu->arch.cr0 & X86_CR0_PE))
4121 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4122
b6c7a5dc
HB
4123 vcpu_put(vcpu);
4124
4125 return 0;
4126}
4127
d0bfb940
JK
4128int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4129 struct kvm_guest_debug *dbg)
b6c7a5dc 4130{
ae675ef0 4131 int i, r;
b6c7a5dc
HB
4132
4133 vcpu_load(vcpu);
4134
ae675ef0
JK
4135 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4136 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4137 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4138 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4139 vcpu->arch.switch_db_regs =
4140 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4141 } else {
4142 for (i = 0; i < KVM_NR_DB_REGS; i++)
4143 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4144 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4145 }
4146
b6c7a5dc
HB
4147 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4148
d0bfb940
JK
4149 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4150 kvm_queue_exception(vcpu, DB_VECTOR);
4151 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4152 kvm_queue_exception(vcpu, BP_VECTOR);
4153
b6c7a5dc
HB
4154 vcpu_put(vcpu);
4155
4156 return r;
4157}
4158
d0752060
HB
4159/*
4160 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4161 * we have asm/x86/processor.h
4162 */
4163struct fxsave {
4164 u16 cwd;
4165 u16 swd;
4166 u16 twd;
4167 u16 fop;
4168 u64 rip;
4169 u64 rdp;
4170 u32 mxcsr;
4171 u32 mxcsr_mask;
4172 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4173#ifdef CONFIG_X86_64
4174 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4175#else
4176 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4177#endif
4178};
4179
8b006791
ZX
4180/*
4181 * Translate a guest virtual address to a guest physical address.
4182 */
4183int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4184 struct kvm_translation *tr)
4185{
4186 unsigned long vaddr = tr->linear_address;
4187 gpa_t gpa;
4188
4189 vcpu_load(vcpu);
72dc67a6 4190 down_read(&vcpu->kvm->slots_lock);
ad312c7c 4191 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 4192 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
4193 tr->physical_address = gpa;
4194 tr->valid = gpa != UNMAPPED_GVA;
4195 tr->writeable = 1;
4196 tr->usermode = 0;
8b006791
ZX
4197 vcpu_put(vcpu);
4198
4199 return 0;
4200}
4201
d0752060
HB
4202int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4203{
ad312c7c 4204 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4205
4206 vcpu_load(vcpu);
4207
4208 memcpy(fpu->fpr, fxsave->st_space, 128);
4209 fpu->fcw = fxsave->cwd;
4210 fpu->fsw = fxsave->swd;
4211 fpu->ftwx = fxsave->twd;
4212 fpu->last_opcode = fxsave->fop;
4213 fpu->last_ip = fxsave->rip;
4214 fpu->last_dp = fxsave->rdp;
4215 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4216
4217 vcpu_put(vcpu);
4218
4219 return 0;
4220}
4221
4222int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4223{
ad312c7c 4224 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4225
4226 vcpu_load(vcpu);
4227
4228 memcpy(fxsave->st_space, fpu->fpr, 128);
4229 fxsave->cwd = fpu->fcw;
4230 fxsave->swd = fpu->fsw;
4231 fxsave->twd = fpu->ftwx;
4232 fxsave->fop = fpu->last_opcode;
4233 fxsave->rip = fpu->last_ip;
4234 fxsave->rdp = fpu->last_dp;
4235 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4236
4237 vcpu_put(vcpu);
4238
4239 return 0;
4240}
4241
4242void fx_init(struct kvm_vcpu *vcpu)
4243{
4244 unsigned after_mxcsr_mask;
4245
bc1a34f1
AA
4246 /*
4247 * Touch the fpu the first time in non atomic context as if
4248 * this is the first fpu instruction the exception handler
4249 * will fire before the instruction returns and it'll have to
4250 * allocate ram with GFP_KERNEL.
4251 */
4252 if (!used_math())
d6e88aec 4253 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4254
d0752060
HB
4255 /* Initialize guest FPU by resetting ours and saving into guest's */
4256 preempt_disable();
d6e88aec
AK
4257 kvm_fx_save(&vcpu->arch.host_fx_image);
4258 kvm_fx_finit();
4259 kvm_fx_save(&vcpu->arch.guest_fx_image);
4260 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4261 preempt_enable();
4262
ad312c7c 4263 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4264 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4265 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4266 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4267 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4268}
4269EXPORT_SYMBOL_GPL(fx_init);
4270
4271void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4272{
4273 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4274 return;
4275
4276 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4277 kvm_fx_save(&vcpu->arch.host_fx_image);
4278 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4279}
4280EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4281
4282void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4283{
4284 if (!vcpu->guest_fpu_loaded)
4285 return;
4286
4287 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4288 kvm_fx_save(&vcpu->arch.guest_fx_image);
4289 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4290 ++vcpu->stat.fpu_reload;
d0752060
HB
4291}
4292EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4293
4294void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4295{
7f1ea208
JR
4296 if (vcpu->arch.time_page) {
4297 kvm_release_page_dirty(vcpu->arch.time_page);
4298 vcpu->arch.time_page = NULL;
4299 }
4300
e9b11c17
ZX
4301 kvm_x86_ops->vcpu_free(vcpu);
4302}
4303
4304struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4305 unsigned int id)
4306{
26e5215f
AK
4307 return kvm_x86_ops->vcpu_create(kvm, id);
4308}
e9b11c17 4309
26e5215f
AK
4310int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4311{
4312 int r;
e9b11c17
ZX
4313
4314 /* We do fxsave: this must be aligned. */
ad312c7c 4315 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4316
0bed3b56 4317 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4318 vcpu_load(vcpu);
4319 r = kvm_arch_vcpu_reset(vcpu);
4320 if (r == 0)
4321 r = kvm_mmu_setup(vcpu);
4322 vcpu_put(vcpu);
4323 if (r < 0)
4324 goto free_vcpu;
4325
26e5215f 4326 return 0;
e9b11c17
ZX
4327free_vcpu:
4328 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4329 return r;
e9b11c17
ZX
4330}
4331
d40ccc62 4332void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4333{
4334 vcpu_load(vcpu);
4335 kvm_mmu_unload(vcpu);
4336 vcpu_put(vcpu);
4337
4338 kvm_x86_ops->vcpu_free(vcpu);
4339}
4340
4341int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4342{
448fa4a9
JK
4343 vcpu->arch.nmi_pending = false;
4344 vcpu->arch.nmi_injected = false;
4345
42dbaa5a
JK
4346 vcpu->arch.switch_db_regs = 0;
4347 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4348 vcpu->arch.dr6 = DR6_FIXED_1;
4349 vcpu->arch.dr7 = DR7_FIXED_1;
4350
e9b11c17
ZX
4351 return kvm_x86_ops->vcpu_reset(vcpu);
4352}
4353
4354void kvm_arch_hardware_enable(void *garbage)
4355{
4356 kvm_x86_ops->hardware_enable(garbage);
4357}
4358
4359void kvm_arch_hardware_disable(void *garbage)
4360{
4361 kvm_x86_ops->hardware_disable(garbage);
4362}
4363
4364int kvm_arch_hardware_setup(void)
4365{
4366 return kvm_x86_ops->hardware_setup();
4367}
4368
4369void kvm_arch_hardware_unsetup(void)
4370{
4371 kvm_x86_ops->hardware_unsetup();
4372}
4373
4374void kvm_arch_check_processor_compat(void *rtn)
4375{
4376 kvm_x86_ops->check_processor_compatibility(rtn);
4377}
4378
4379int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4380{
4381 struct page *page;
4382 struct kvm *kvm;
4383 int r;
4384
4385 BUG_ON(vcpu->kvm == NULL);
4386 kvm = vcpu->kvm;
4387
ad312c7c 4388 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 4389 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 4390 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4391 else
a4535290 4392 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4393
4394 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4395 if (!page) {
4396 r = -ENOMEM;
4397 goto fail;
4398 }
ad312c7c 4399 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4400
4401 r = kvm_mmu_create(vcpu);
4402 if (r < 0)
4403 goto fail_free_pio_data;
4404
4405 if (irqchip_in_kernel(kvm)) {
4406 r = kvm_create_lapic(vcpu);
4407 if (r < 0)
4408 goto fail_mmu_destroy;
4409 }
4410
4411 return 0;
4412
4413fail_mmu_destroy:
4414 kvm_mmu_destroy(vcpu);
4415fail_free_pio_data:
ad312c7c 4416 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4417fail:
4418 return r;
4419}
4420
4421void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4422{
4423 kvm_free_lapic(vcpu);
3200f405 4424 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4425 kvm_mmu_destroy(vcpu);
3200f405 4426 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4427 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4428}
d19a9cd2
ZX
4429
4430struct kvm *kvm_arch_create_vm(void)
4431{
4432 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4433
4434 if (!kvm)
4435 return ERR_PTR(-ENOMEM);
4436
f05e70ac 4437 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4d5c5d0f 4438 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4439
5550af4d
SY
4440 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4441 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4442
53f658b3
MT
4443 rdtscll(kvm->arch.vm_init_tsc);
4444
d19a9cd2
ZX
4445 return kvm;
4446}
4447
4448static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4449{
4450 vcpu_load(vcpu);
4451 kvm_mmu_unload(vcpu);
4452 vcpu_put(vcpu);
4453}
4454
4455static void kvm_free_vcpus(struct kvm *kvm)
4456{
4457 unsigned int i;
4458
4459 /*
4460 * Unpin any mmu pages first.
4461 */
4462 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4463 if (kvm->vcpus[i])
4464 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4465 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4466 if (kvm->vcpus[i]) {
4467 kvm_arch_vcpu_free(kvm->vcpus[i]);
4468 kvm->vcpus[i] = NULL;
4469 }
4470 }
4471
4472}
4473
ad8ba2cd
SY
4474void kvm_arch_sync_events(struct kvm *kvm)
4475{
ba4cef31 4476 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4477}
4478
d19a9cd2
ZX
4479void kvm_arch_destroy_vm(struct kvm *kvm)
4480{
6eb55818 4481 kvm_iommu_unmap_guest(kvm);
7837699f 4482 kvm_free_pit(kvm);
d7deeeb0
ZX
4483 kfree(kvm->arch.vpic);
4484 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4485 kvm_free_vcpus(kvm);
4486 kvm_free_physmem(kvm);
3d45830c
AK
4487 if (kvm->arch.apic_access_page)
4488 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4489 if (kvm->arch.ept_identity_pagetable)
4490 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4491 kfree(kvm);
4492}
0de10343
ZX
4493
4494int kvm_arch_set_memory_region(struct kvm *kvm,
4495 struct kvm_userspace_memory_region *mem,
4496 struct kvm_memory_slot old,
4497 int user_alloc)
4498{
4499 int npages = mem->memory_size >> PAGE_SHIFT;
4500 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4501
4502 /*To keep backward compatibility with older userspace,
4503 *x86 needs to hanlde !user_alloc case.
4504 */
4505 if (!user_alloc) {
4506 if (npages && !old.rmap) {
604b38ac
AA
4507 unsigned long userspace_addr;
4508
72dc67a6 4509 down_write(&current->mm->mmap_sem);
604b38ac
AA
4510 userspace_addr = do_mmap(NULL, 0,
4511 npages * PAGE_SIZE,
4512 PROT_READ | PROT_WRITE,
acee3c04 4513 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4514 0);
72dc67a6 4515 up_write(&current->mm->mmap_sem);
0de10343 4516
604b38ac
AA
4517 if (IS_ERR((void *)userspace_addr))
4518 return PTR_ERR((void *)userspace_addr);
4519
4520 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4521 spin_lock(&kvm->mmu_lock);
4522 memslot->userspace_addr = userspace_addr;
4523 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4524 } else {
4525 if (!old.user_alloc && old.rmap) {
4526 int ret;
4527
72dc67a6 4528 down_write(&current->mm->mmap_sem);
0de10343
ZX
4529 ret = do_munmap(current->mm, old.userspace_addr,
4530 old.npages * PAGE_SIZE);
72dc67a6 4531 up_write(&current->mm->mmap_sem);
0de10343
ZX
4532 if (ret < 0)
4533 printk(KERN_WARNING
4534 "kvm_vm_ioctl_set_memory_region: "
4535 "failed to munmap memory\n");
4536 }
4537 }
4538 }
4539
f05e70ac 4540 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4541 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4542 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4543 }
4544
4545 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4546 kvm_flush_remote_tlbs(kvm);
4547
4548 return 0;
4549}
1d737c8a 4550
34d4cb8f
MT
4551void kvm_arch_flush_shadow(struct kvm *kvm)
4552{
4553 kvm_mmu_zap_all(kvm);
4554}
4555
1d737c8a
ZX
4556int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4557{
a4535290 4558 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4559 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4560 || vcpu->arch.nmi_pending;
1d737c8a 4561}
5736199a
ZX
4562
4563static void vcpu_kick_intr(void *info)
4564{
4565#ifdef DEBUG
4566 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4567 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4568#endif
4569}
4570
4571void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4572{
4573 int ipi_pcpu = vcpu->cpu;
9b62e5b1 4574 int cpu;
5736199a
ZX
4575
4576 if (waitqueue_active(&vcpu->wq)) {
4577 wake_up_interruptible(&vcpu->wq);
4578 ++vcpu->stat.halt_wakeup;
4579 }
e9571ed5
MT
4580 /*
4581 * We may be called synchronously with irqs disabled in guest mode,
4582 * So need not to call smp_call_function_single() in that case.
4583 */
9b62e5b1 4584 cpu = get_cpu();
e9571ed5 4585 if (vcpu->guest_mode && vcpu->cpu != cpu)
8691e5a8 4586 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
e9571ed5 4587 put_cpu();
5736199a 4588}
78646121
GN
4589
4590int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4591{
4592 return kvm_x86_ops->interrupt_allowed(vcpu);
4593}