]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kvm/x86.c
KVM: Do not allow interrupt injection from userspace if there is a pending event.
[net-next-2.6.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
c8076604 39#include <linux/cpufreq.h>
043405e1
CO
40
41#include <asm/uaccess.h>
d825ed0a 42#include <asm/msr.h>
a5f61300 43#include <asm/desc.h>
0bed3b56 44#include <asm/mtrr.h>
043405e1 45
313a3dc7 46#define MAX_IO_MSRS 256
a03490ed
CO
47#define CR0_RESERVED_BITS \
48 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
49 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
50 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
51#define CR4_RESERVED_BITS \
52 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
53 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
54 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
55 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
56
57#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
58/* EFER defaults:
59 * - enable syscall per default because its emulated by KVM
60 * - enable LME and LMA per default on 64 bit KVM
61 */
62#ifdef CONFIG_X86_64
63static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
64#else
65static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
66#endif
313a3dc7 67
ba1389b7
AK
68#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
69#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 70
674eea0f
AK
71static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
72 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
73struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
74 u32 function, u32 index);
674eea0f 75
97896d04 76struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 77EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 78
417bc304 79struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
80 { "pf_fixed", VCPU_STAT(pf_fixed) },
81 { "pf_guest", VCPU_STAT(pf_guest) },
82 { "tlb_flush", VCPU_STAT(tlb_flush) },
83 { "invlpg", VCPU_STAT(invlpg) },
84 { "exits", VCPU_STAT(exits) },
85 { "io_exits", VCPU_STAT(io_exits) },
86 { "mmio_exits", VCPU_STAT(mmio_exits) },
87 { "signal_exits", VCPU_STAT(signal_exits) },
88 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 89 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
90 { "halt_exits", VCPU_STAT(halt_exits) },
91 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 92 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
93 { "request_irq", VCPU_STAT(request_irq_exits) },
94 { "irq_exits", VCPU_STAT(irq_exits) },
95 { "host_state_reload", VCPU_STAT(host_state_reload) },
96 { "efer_reload", VCPU_STAT(efer_reload) },
97 { "fpu_reload", VCPU_STAT(fpu_reload) },
98 { "insn_emulation", VCPU_STAT(insn_emulation) },
99 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 100 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 101 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
102 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
103 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
104 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
105 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
106 { "mmu_flooded", VM_STAT(mmu_flooded) },
107 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 108 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 109 { "mmu_unsync", VM_STAT(mmu_unsync) },
0f74a24c 110 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 111 { "largepages", VM_STAT(lpages) },
417bc304
HB
112 { NULL }
113};
114
5fb76f9b
CO
115unsigned long segment_base(u16 selector)
116{
117 struct descriptor_table gdt;
a5f61300 118 struct desc_struct *d;
5fb76f9b
CO
119 unsigned long table_base;
120 unsigned long v;
121
122 if (selector == 0)
123 return 0;
124
125 asm("sgdt %0" : "=m"(gdt));
126 table_base = gdt.base;
127
128 if (selector & 4) { /* from ldt */
129 u16 ldt_selector;
130
131 asm("sldt %0" : "=g"(ldt_selector));
132 table_base = segment_base(ldt_selector);
133 }
a5f61300
AK
134 d = (struct desc_struct *)(table_base + (selector & ~7));
135 v = d->base0 | ((unsigned long)d->base1 << 16) |
136 ((unsigned long)d->base2 << 24);
5fb76f9b 137#ifdef CONFIG_X86_64
a5f61300
AK
138 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
139 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
140#endif
141 return v;
142}
143EXPORT_SYMBOL_GPL(segment_base);
144
6866b83e
CO
145u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
146{
147 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 148 return vcpu->arch.apic_base;
6866b83e 149 else
ad312c7c 150 return vcpu->arch.apic_base;
6866b83e
CO
151}
152EXPORT_SYMBOL_GPL(kvm_get_apic_base);
153
154void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
155{
156 /* TODO: reserve bits check */
157 if (irqchip_in_kernel(vcpu->kvm))
158 kvm_lapic_set_base(vcpu, data);
159 else
ad312c7c 160 vcpu->arch.apic_base = data;
6866b83e
CO
161}
162EXPORT_SYMBOL_GPL(kvm_set_apic_base);
163
298101da
AK
164void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
165{
ad312c7c
ZX
166 WARN_ON(vcpu->arch.exception.pending);
167 vcpu->arch.exception.pending = true;
168 vcpu->arch.exception.has_error_code = false;
169 vcpu->arch.exception.nr = nr;
298101da
AK
170}
171EXPORT_SYMBOL_GPL(kvm_queue_exception);
172
c3c91fee
AK
173void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
174 u32 error_code)
175{
176 ++vcpu->stat.pf_guest;
d8017474 177
71c4dfaf
JR
178 if (vcpu->arch.exception.pending) {
179 if (vcpu->arch.exception.nr == PF_VECTOR) {
180 printk(KERN_DEBUG "kvm: inject_page_fault:"
181 " double fault 0x%lx\n", addr);
182 vcpu->arch.exception.nr = DF_VECTOR;
183 vcpu->arch.exception.error_code = 0;
184 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
185 /* triple fault -> shutdown */
186 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
187 }
c3c91fee
AK
188 return;
189 }
ad312c7c 190 vcpu->arch.cr2 = addr;
c3c91fee
AK
191 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
192}
193
3419ffc8
SY
194void kvm_inject_nmi(struct kvm_vcpu *vcpu)
195{
196 vcpu->arch.nmi_pending = 1;
197}
198EXPORT_SYMBOL_GPL(kvm_inject_nmi);
199
298101da
AK
200void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
201{
ad312c7c
ZX
202 WARN_ON(vcpu->arch.exception.pending);
203 vcpu->arch.exception.pending = true;
204 vcpu->arch.exception.has_error_code = true;
205 vcpu->arch.exception.nr = nr;
206 vcpu->arch.exception.error_code = error_code;
298101da
AK
207}
208EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
209
210static void __queue_exception(struct kvm_vcpu *vcpu)
211{
ad312c7c
ZX
212 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
213 vcpu->arch.exception.has_error_code,
214 vcpu->arch.exception.error_code);
298101da
AK
215}
216
a03490ed
CO
217/*
218 * Load the pae pdptrs. Return true is they are all valid.
219 */
220int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
221{
222 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
223 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
224 int i;
225 int ret;
ad312c7c 226 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 227
a03490ed
CO
228 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
229 offset * sizeof(u64), sizeof(pdpte));
230 if (ret < 0) {
231 ret = 0;
232 goto out;
233 }
234 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
20c466b5
DE
235 if (is_present_pte(pdpte[i]) &&
236 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
a03490ed
CO
237 ret = 0;
238 goto out;
239 }
240 }
241 ret = 1;
242
ad312c7c 243 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 244out:
a03490ed
CO
245
246 return ret;
247}
cc4b6871 248EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 249
d835dfec
AK
250static bool pdptrs_changed(struct kvm_vcpu *vcpu)
251{
ad312c7c 252 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
253 bool changed = true;
254 int r;
255
256 if (is_long_mode(vcpu) || !is_pae(vcpu))
257 return false;
258
ad312c7c 259 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
260 if (r < 0)
261 goto out;
ad312c7c 262 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 263out:
d835dfec
AK
264
265 return changed;
266}
267
2d3ad1f4 268void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
269{
270 if (cr0 & CR0_RESERVED_BITS) {
271 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 272 cr0, vcpu->arch.cr0);
c1a5d4f9 273 kvm_inject_gp(vcpu, 0);
a03490ed
CO
274 return;
275 }
276
277 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
278 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 279 kvm_inject_gp(vcpu, 0);
a03490ed
CO
280 return;
281 }
282
283 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
284 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
285 "and a clear PE flag\n");
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288 }
289
290 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
291#ifdef CONFIG_X86_64
ad312c7c 292 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
293 int cs_db, cs_l;
294
295 if (!is_pae(vcpu)) {
296 printk(KERN_DEBUG "set_cr0: #GP, start paging "
297 "in long mode while PAE is disabled\n");
c1a5d4f9 298 kvm_inject_gp(vcpu, 0);
a03490ed
CO
299 return;
300 }
301 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
302 if (cs_l) {
303 printk(KERN_DEBUG "set_cr0: #GP, start paging "
304 "in long mode while CS.L == 1\n");
c1a5d4f9 305 kvm_inject_gp(vcpu, 0);
a03490ed
CO
306 return;
307
308 }
309 } else
310#endif
ad312c7c 311 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
312 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
313 "reserved bits\n");
c1a5d4f9 314 kvm_inject_gp(vcpu, 0);
a03490ed
CO
315 return;
316 }
317
318 }
319
320 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 321 vcpu->arch.cr0 = cr0;
a03490ed 322
a03490ed 323 kvm_mmu_reset_context(vcpu);
a03490ed
CO
324 return;
325}
2d3ad1f4 326EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 327
2d3ad1f4 328void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 329{
2d3ad1f4 330 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
331 KVMTRACE_1D(LMSW, vcpu,
332 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
333 handler);
a03490ed 334}
2d3ad1f4 335EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 336
2d3ad1f4 337void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 338{
a2edf57f
AK
339 unsigned long old_cr4 = vcpu->arch.cr4;
340 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
341
a03490ed
CO
342 if (cr4 & CR4_RESERVED_BITS) {
343 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 344 kvm_inject_gp(vcpu, 0);
a03490ed
CO
345 return;
346 }
347
348 if (is_long_mode(vcpu)) {
349 if (!(cr4 & X86_CR4_PAE)) {
350 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
351 "in long mode\n");
c1a5d4f9 352 kvm_inject_gp(vcpu, 0);
a03490ed
CO
353 return;
354 }
a2edf57f
AK
355 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
356 && ((cr4 ^ old_cr4) & pdptr_bits)
ad312c7c 357 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 358 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 359 kvm_inject_gp(vcpu, 0);
a03490ed
CO
360 return;
361 }
362
363 if (cr4 & X86_CR4_VMXE) {
364 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 365 kvm_inject_gp(vcpu, 0);
a03490ed
CO
366 return;
367 }
368 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 369 vcpu->arch.cr4 = cr4;
5a41accd 370 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
a03490ed 371 kvm_mmu_reset_context(vcpu);
a03490ed 372}
2d3ad1f4 373EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 374
2d3ad1f4 375void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 376{
ad312c7c 377 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 378 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
379 kvm_mmu_flush_tlb(vcpu);
380 return;
381 }
382
a03490ed
CO
383 if (is_long_mode(vcpu)) {
384 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
385 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 386 kvm_inject_gp(vcpu, 0);
a03490ed
CO
387 return;
388 }
389 } else {
390 if (is_pae(vcpu)) {
391 if (cr3 & CR3_PAE_RESERVED_BITS) {
392 printk(KERN_DEBUG
393 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 394 kvm_inject_gp(vcpu, 0);
a03490ed
CO
395 return;
396 }
397 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
398 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
399 "reserved bits\n");
c1a5d4f9 400 kvm_inject_gp(vcpu, 0);
a03490ed
CO
401 return;
402 }
403 }
404 /*
405 * We don't check reserved bits in nonpae mode, because
406 * this isn't enforced, and VMware depends on this.
407 */
408 }
409
a03490ed
CO
410 /*
411 * Does the new cr3 value map to physical memory? (Note, we
412 * catch an invalid cr3 even in real-mode, because it would
413 * cause trouble later on when we turn on paging anyway.)
414 *
415 * A real CPU would silently accept an invalid cr3 and would
416 * attempt to use it - with largely undefined (and often hard
417 * to debug) behavior on the guest side.
418 */
419 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 420 kvm_inject_gp(vcpu, 0);
a03490ed 421 else {
ad312c7c
ZX
422 vcpu->arch.cr3 = cr3;
423 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 424 }
a03490ed 425}
2d3ad1f4 426EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 427
2d3ad1f4 428void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
429{
430 if (cr8 & CR8_RESERVED_BITS) {
431 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 432 kvm_inject_gp(vcpu, 0);
a03490ed
CO
433 return;
434 }
435 if (irqchip_in_kernel(vcpu->kvm))
436 kvm_lapic_set_tpr(vcpu, cr8);
437 else
ad312c7c 438 vcpu->arch.cr8 = cr8;
a03490ed 439}
2d3ad1f4 440EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 441
2d3ad1f4 442unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
443{
444 if (irqchip_in_kernel(vcpu->kvm))
445 return kvm_lapic_get_cr8(vcpu);
446 else
ad312c7c 447 return vcpu->arch.cr8;
a03490ed 448}
2d3ad1f4 449EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 450
d8017474
AG
451static inline u32 bit(int bitno)
452{
453 return 1 << (bitno & 31);
454}
455
043405e1
CO
456/*
457 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
458 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
459 *
460 * This list is modified at module load time to reflect the
461 * capabilities of the host cpu.
462 */
463static u32 msrs_to_save[] = {
464 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
465 MSR_K6_STAR,
466#ifdef CONFIG_X86_64
467 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
468#endif
18068523 469 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 470 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
471};
472
473static unsigned num_msrs_to_save;
474
475static u32 emulated_msrs[] = {
476 MSR_IA32_MISC_ENABLE,
477};
478
15c4a640
CO
479static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
480{
f2b4b7dd 481 if (efer & efer_reserved_bits) {
15c4a640
CO
482 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
483 efer);
c1a5d4f9 484 kvm_inject_gp(vcpu, 0);
15c4a640
CO
485 return;
486 }
487
488 if (is_paging(vcpu)
ad312c7c 489 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 490 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 491 kvm_inject_gp(vcpu, 0);
15c4a640
CO
492 return;
493 }
494
1b2fd70c
AG
495 if (efer & EFER_FFXSR) {
496 struct kvm_cpuid_entry2 *feat;
497
498 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
499 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
500 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
501 kvm_inject_gp(vcpu, 0);
502 return;
503 }
504 }
505
d8017474
AG
506 if (efer & EFER_SVME) {
507 struct kvm_cpuid_entry2 *feat;
508
509 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
510 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
511 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
512 kvm_inject_gp(vcpu, 0);
513 return;
514 }
515 }
516
15c4a640
CO
517 kvm_x86_ops->set_efer(vcpu, efer);
518
519 efer &= ~EFER_LMA;
ad312c7c 520 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 521
ad312c7c 522 vcpu->arch.shadow_efer = efer;
9645bb56
AK
523
524 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
525 kvm_mmu_reset_context(vcpu);
15c4a640
CO
526}
527
f2b4b7dd
JR
528void kvm_enable_efer_bits(u64 mask)
529{
530 efer_reserved_bits &= ~mask;
531}
532EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
533
534
15c4a640
CO
535/*
536 * Writes msr value into into the appropriate "register".
537 * Returns 0 on success, non-0 otherwise.
538 * Assumes vcpu_load() was already called.
539 */
540int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
541{
542 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
543}
544
313a3dc7
CO
545/*
546 * Adapt set_msr() to msr_io()'s calling convention
547 */
548static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
549{
550 return kvm_set_msr(vcpu, index, *data);
551}
552
18068523
GOC
553static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
554{
555 static int version;
50d0a0f9
GH
556 struct pvclock_wall_clock wc;
557 struct timespec now, sys, boot;
18068523
GOC
558
559 if (!wall_clock)
560 return;
561
562 version++;
563
18068523
GOC
564 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
565
50d0a0f9
GH
566 /*
567 * The guest calculates current wall clock time by adding
568 * system time (updated by kvm_write_guest_time below) to the
569 * wall clock specified here. guest system time equals host
570 * system time for us, thus we must fill in host boot time here.
571 */
572 now = current_kernel_time();
573 ktime_get_ts(&sys);
574 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
575
576 wc.sec = boot.tv_sec;
577 wc.nsec = boot.tv_nsec;
578 wc.version = version;
18068523
GOC
579
580 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
581
582 version++;
583 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
584}
585
50d0a0f9
GH
586static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
587{
588 uint32_t quotient, remainder;
589
590 /* Don't try to replace with do_div(), this one calculates
591 * "(dividend << 32) / divisor" */
592 __asm__ ( "divl %4"
593 : "=a" (quotient), "=d" (remainder)
594 : "0" (0), "1" (dividend), "r" (divisor) );
595 return quotient;
596}
597
598static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
599{
600 uint64_t nsecs = 1000000000LL;
601 int32_t shift = 0;
602 uint64_t tps64;
603 uint32_t tps32;
604
605 tps64 = tsc_khz * 1000LL;
606 while (tps64 > nsecs*2) {
607 tps64 >>= 1;
608 shift--;
609 }
610
611 tps32 = (uint32_t)tps64;
612 while (tps32 <= (uint32_t)nsecs) {
613 tps32 <<= 1;
614 shift++;
615 }
616
617 hv_clock->tsc_shift = shift;
618 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
619
620 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 621 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
622 hv_clock->tsc_to_system_mul);
623}
624
c8076604
GH
625static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
626
18068523
GOC
627static void kvm_write_guest_time(struct kvm_vcpu *v)
628{
629 struct timespec ts;
630 unsigned long flags;
631 struct kvm_vcpu_arch *vcpu = &v->arch;
632 void *shared_kaddr;
463656c0 633 unsigned long this_tsc_khz;
18068523
GOC
634
635 if ((!vcpu->time_page))
636 return;
637
463656c0
AK
638 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
639 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
640 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
641 vcpu->hv_clock_tsc_khz = this_tsc_khz;
50d0a0f9 642 }
463656c0 643 put_cpu_var(cpu_tsc_khz);
50d0a0f9 644
18068523
GOC
645 /* Keep irq disabled to prevent changes to the clock */
646 local_irq_save(flags);
647 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
648 &vcpu->hv_clock.tsc_timestamp);
649 ktime_get_ts(&ts);
650 local_irq_restore(flags);
651
652 /* With all the info we got, fill in the values */
653
654 vcpu->hv_clock.system_time = ts.tv_nsec +
655 (NSEC_PER_SEC * (u64)ts.tv_sec);
656 /*
657 * The interface expects us to write an even number signaling that the
658 * update is finished. Since the guest won't see the intermediate
50d0a0f9 659 * state, we just increase by 2 at the end.
18068523 660 */
50d0a0f9 661 vcpu->hv_clock.version += 2;
18068523
GOC
662
663 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
664
665 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 666 sizeof(vcpu->hv_clock));
18068523
GOC
667
668 kunmap_atomic(shared_kaddr, KM_USER0);
669
670 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
671}
672
c8076604
GH
673static int kvm_request_guest_time_update(struct kvm_vcpu *v)
674{
675 struct kvm_vcpu_arch *vcpu = &v->arch;
676
677 if (!vcpu->time_page)
678 return 0;
679 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
680 return 1;
681}
682
9ba075a6
AK
683static bool msr_mtrr_valid(unsigned msr)
684{
685 switch (msr) {
686 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
687 case MSR_MTRRfix64K_00000:
688 case MSR_MTRRfix16K_80000:
689 case MSR_MTRRfix16K_A0000:
690 case MSR_MTRRfix4K_C0000:
691 case MSR_MTRRfix4K_C8000:
692 case MSR_MTRRfix4K_D0000:
693 case MSR_MTRRfix4K_D8000:
694 case MSR_MTRRfix4K_E0000:
695 case MSR_MTRRfix4K_E8000:
696 case MSR_MTRRfix4K_F0000:
697 case MSR_MTRRfix4K_F8000:
698 case MSR_MTRRdefType:
699 case MSR_IA32_CR_PAT:
700 return true;
701 case 0x2f8:
702 return true;
703 }
704 return false;
705}
706
707static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
708{
0bed3b56
SY
709 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
710
9ba075a6
AK
711 if (!msr_mtrr_valid(msr))
712 return 1;
713
0bed3b56
SY
714 if (msr == MSR_MTRRdefType) {
715 vcpu->arch.mtrr_state.def_type = data;
716 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
717 } else if (msr == MSR_MTRRfix64K_00000)
718 p[0] = data;
719 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
720 p[1 + msr - MSR_MTRRfix16K_80000] = data;
721 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
722 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
723 else if (msr == MSR_IA32_CR_PAT)
724 vcpu->arch.pat = data;
725 else { /* Variable MTRRs */
726 int idx, is_mtrr_mask;
727 u64 *pt;
728
729 idx = (msr - 0x200) / 2;
730 is_mtrr_mask = msr - 0x200 - 2 * idx;
731 if (!is_mtrr_mask)
732 pt =
733 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
734 else
735 pt =
736 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
737 *pt = data;
738 }
739
740 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
741 return 0;
742}
15c4a640
CO
743
744int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
745{
746 switch (msr) {
15c4a640
CO
747 case MSR_EFER:
748 set_efer(vcpu, data);
749 break;
15c4a640
CO
750 case MSR_IA32_MC0_STATUS:
751 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 752 __func__, data);
15c4a640
CO
753 break;
754 case MSR_IA32_MCG_STATUS:
755 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 756 __func__, data);
15c4a640 757 break;
c7ac679c
JR
758 case MSR_IA32_MCG_CTL:
759 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 760 __func__, data);
c7ac679c 761 break;
b5e2fec0
AG
762 case MSR_IA32_DEBUGCTLMSR:
763 if (!data) {
764 /* We support the non-activated case already */
765 break;
766 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
767 /* Values other than LBR and BTF are vendor-specific,
768 thus reserved and should throw a #GP */
769 return 1;
770 }
771 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
772 __func__, data);
773 break;
15c4a640
CO
774 case MSR_IA32_UCODE_REV:
775 case MSR_IA32_UCODE_WRITE:
61a6bd67 776 case MSR_VM_HSAVE_PA:
15c4a640 777 break;
9ba075a6
AK
778 case 0x200 ... 0x2ff:
779 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
780 case MSR_IA32_APICBASE:
781 kvm_set_apic_base(vcpu, data);
782 break;
783 case MSR_IA32_MISC_ENABLE:
ad312c7c 784 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 785 break;
18068523
GOC
786 case MSR_KVM_WALL_CLOCK:
787 vcpu->kvm->arch.wall_clock = data;
788 kvm_write_wall_clock(vcpu->kvm, data);
789 break;
790 case MSR_KVM_SYSTEM_TIME: {
791 if (vcpu->arch.time_page) {
792 kvm_release_page_dirty(vcpu->arch.time_page);
793 vcpu->arch.time_page = NULL;
794 }
795
796 vcpu->arch.time = data;
797
798 /* we verify if the enable bit is set... */
799 if (!(data & 1))
800 break;
801
802 /* ...but clean it before doing the actual write */
803 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
804
18068523
GOC
805 vcpu->arch.time_page =
806 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
807
808 if (is_error_page(vcpu->arch.time_page)) {
809 kvm_release_page_clean(vcpu->arch.time_page);
810 vcpu->arch.time_page = NULL;
811 }
812
c8076604 813 kvm_request_guest_time_update(vcpu);
18068523
GOC
814 break;
815 }
15c4a640 816 default:
565f1fbd 817 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
818 return 1;
819 }
820 return 0;
821}
822EXPORT_SYMBOL_GPL(kvm_set_msr_common);
823
824
825/*
826 * Reads an msr value (of 'msr_index') into 'pdata'.
827 * Returns 0 on success, non-0 otherwise.
828 * Assumes vcpu_load() was already called.
829 */
830int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
831{
832 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
833}
834
9ba075a6
AK
835static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
836{
0bed3b56
SY
837 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
838
9ba075a6
AK
839 if (!msr_mtrr_valid(msr))
840 return 1;
841
0bed3b56
SY
842 if (msr == MSR_MTRRdefType)
843 *pdata = vcpu->arch.mtrr_state.def_type +
844 (vcpu->arch.mtrr_state.enabled << 10);
845 else if (msr == MSR_MTRRfix64K_00000)
846 *pdata = p[0];
847 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
848 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
849 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
850 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
851 else if (msr == MSR_IA32_CR_PAT)
852 *pdata = vcpu->arch.pat;
853 else { /* Variable MTRRs */
854 int idx, is_mtrr_mask;
855 u64 *pt;
856
857 idx = (msr - 0x200) / 2;
858 is_mtrr_mask = msr - 0x200 - 2 * idx;
859 if (!is_mtrr_mask)
860 pt =
861 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
862 else
863 pt =
864 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
865 *pdata = *pt;
866 }
867
9ba075a6
AK
868 return 0;
869}
870
15c4a640
CO
871int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
872{
873 u64 data;
874
875 switch (msr) {
876 case 0xc0010010: /* SYSCFG */
877 case 0xc0010015: /* HWCR */
878 case MSR_IA32_PLATFORM_ID:
879 case MSR_IA32_P5_MC_ADDR:
880 case MSR_IA32_P5_MC_TYPE:
881 case MSR_IA32_MC0_CTL:
882 case MSR_IA32_MCG_STATUS:
883 case MSR_IA32_MCG_CAP:
c7ac679c 884 case MSR_IA32_MCG_CTL:
15c4a640
CO
885 case MSR_IA32_MC0_MISC:
886 case MSR_IA32_MC0_MISC+4:
887 case MSR_IA32_MC0_MISC+8:
888 case MSR_IA32_MC0_MISC+12:
889 case MSR_IA32_MC0_MISC+16:
a89c1ad2 890 case MSR_IA32_MC0_MISC+20:
15c4a640 891 case MSR_IA32_UCODE_REV:
15c4a640 892 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
893 case MSR_IA32_DEBUGCTLMSR:
894 case MSR_IA32_LASTBRANCHFROMIP:
895 case MSR_IA32_LASTBRANCHTOIP:
896 case MSR_IA32_LASTINTFROMIP:
897 case MSR_IA32_LASTINTTOIP:
61a6bd67 898 case MSR_VM_HSAVE_PA:
7fe29e0f
AS
899 case MSR_P6_EVNTSEL0:
900 case MSR_P6_EVNTSEL1:
15c4a640
CO
901 data = 0;
902 break;
9ba075a6
AK
903 case MSR_MTRRcap:
904 data = 0x500 | KVM_NR_VAR_MTRR;
905 break;
906 case 0x200 ... 0x2ff:
907 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
908 case 0xcd: /* fsb frequency */
909 data = 3;
910 break;
911 case MSR_IA32_APICBASE:
912 data = kvm_get_apic_base(vcpu);
913 break;
914 case MSR_IA32_MISC_ENABLE:
ad312c7c 915 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 916 break;
847f0ad8
AG
917 case MSR_IA32_PERF_STATUS:
918 /* TSC increment by tick */
919 data = 1000ULL;
920 /* CPU multiplier */
921 data |= (((uint64_t)4ULL) << 40);
922 break;
15c4a640 923 case MSR_EFER:
ad312c7c 924 data = vcpu->arch.shadow_efer;
15c4a640 925 break;
18068523
GOC
926 case MSR_KVM_WALL_CLOCK:
927 data = vcpu->kvm->arch.wall_clock;
928 break;
929 case MSR_KVM_SYSTEM_TIME:
930 data = vcpu->arch.time;
931 break;
15c4a640
CO
932 default:
933 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
934 return 1;
935 }
936 *pdata = data;
937 return 0;
938}
939EXPORT_SYMBOL_GPL(kvm_get_msr_common);
940
313a3dc7
CO
941/*
942 * Read or write a bunch of msrs. All parameters are kernel addresses.
943 *
944 * @return number of msrs set successfully.
945 */
946static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
947 struct kvm_msr_entry *entries,
948 int (*do_msr)(struct kvm_vcpu *vcpu,
949 unsigned index, u64 *data))
950{
951 int i;
952
953 vcpu_load(vcpu);
954
3200f405 955 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
956 for (i = 0; i < msrs->nmsrs; ++i)
957 if (do_msr(vcpu, entries[i].index, &entries[i].data))
958 break;
3200f405 959 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
960
961 vcpu_put(vcpu);
962
963 return i;
964}
965
966/*
967 * Read or write a bunch of msrs. Parameters are user addresses.
968 *
969 * @return number of msrs set successfully.
970 */
971static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
972 int (*do_msr)(struct kvm_vcpu *vcpu,
973 unsigned index, u64 *data),
974 int writeback)
975{
976 struct kvm_msrs msrs;
977 struct kvm_msr_entry *entries;
978 int r, n;
979 unsigned size;
980
981 r = -EFAULT;
982 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
983 goto out;
984
985 r = -E2BIG;
986 if (msrs.nmsrs >= MAX_IO_MSRS)
987 goto out;
988
989 r = -ENOMEM;
990 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
991 entries = vmalloc(size);
992 if (!entries)
993 goto out;
994
995 r = -EFAULT;
996 if (copy_from_user(entries, user_msrs->entries, size))
997 goto out_free;
998
999 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1000 if (r < 0)
1001 goto out_free;
1002
1003 r = -EFAULT;
1004 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1005 goto out_free;
1006
1007 r = n;
1008
1009out_free:
1010 vfree(entries);
1011out:
1012 return r;
1013}
1014
018d00d2
ZX
1015int kvm_dev_ioctl_check_extension(long ext)
1016{
1017 int r;
1018
1019 switch (ext) {
1020 case KVM_CAP_IRQCHIP:
1021 case KVM_CAP_HLT:
1022 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1023 case KVM_CAP_SET_TSS_ADDR:
07716717 1024 case KVM_CAP_EXT_CPUID:
c8076604 1025 case KVM_CAP_CLOCKSOURCE:
7837699f 1026 case KVM_CAP_PIT:
a28e4f5a 1027 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1028 case KVM_CAP_MP_STATE:
ed848624 1029 case KVM_CAP_SYNC_MMU:
52d939a0 1030 case KVM_CAP_REINJECT_CONTROL:
4925663a 1031 case KVM_CAP_IRQ_INJECT_STATUS:
e56d532f 1032 case KVM_CAP_ASSIGN_DEV_IRQ:
018d00d2
ZX
1033 r = 1;
1034 break;
542472b5
LV
1035 case KVM_CAP_COALESCED_MMIO:
1036 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1037 break;
774ead3a
AK
1038 case KVM_CAP_VAPIC:
1039 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1040 break;
f725230a
AK
1041 case KVM_CAP_NR_VCPUS:
1042 r = KVM_MAX_VCPUS;
1043 break;
a988b910
AK
1044 case KVM_CAP_NR_MEMSLOTS:
1045 r = KVM_MEMORY_SLOTS;
1046 break;
2f333bcb
MT
1047 case KVM_CAP_PV_MMU:
1048 r = !tdp_enabled;
1049 break;
62c476c7 1050 case KVM_CAP_IOMMU:
19de40a8 1051 r = iommu_found();
62c476c7 1052 break;
018d00d2
ZX
1053 default:
1054 r = 0;
1055 break;
1056 }
1057 return r;
1058
1059}
1060
043405e1
CO
1061long kvm_arch_dev_ioctl(struct file *filp,
1062 unsigned int ioctl, unsigned long arg)
1063{
1064 void __user *argp = (void __user *)arg;
1065 long r;
1066
1067 switch (ioctl) {
1068 case KVM_GET_MSR_INDEX_LIST: {
1069 struct kvm_msr_list __user *user_msr_list = argp;
1070 struct kvm_msr_list msr_list;
1071 unsigned n;
1072
1073 r = -EFAULT;
1074 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1075 goto out;
1076 n = msr_list.nmsrs;
1077 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1078 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1079 goto out;
1080 r = -E2BIG;
1081 if (n < num_msrs_to_save)
1082 goto out;
1083 r = -EFAULT;
1084 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1085 num_msrs_to_save * sizeof(u32)))
1086 goto out;
1087 if (copy_to_user(user_msr_list->indices
1088 + num_msrs_to_save * sizeof(u32),
1089 &emulated_msrs,
1090 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1091 goto out;
1092 r = 0;
1093 break;
1094 }
674eea0f
AK
1095 case KVM_GET_SUPPORTED_CPUID: {
1096 struct kvm_cpuid2 __user *cpuid_arg = argp;
1097 struct kvm_cpuid2 cpuid;
1098
1099 r = -EFAULT;
1100 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1101 goto out;
1102 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1103 cpuid_arg->entries);
674eea0f
AK
1104 if (r)
1105 goto out;
1106
1107 r = -EFAULT;
1108 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1109 goto out;
1110 r = 0;
1111 break;
1112 }
043405e1
CO
1113 default:
1114 r = -EINVAL;
1115 }
1116out:
1117 return r;
1118}
1119
313a3dc7
CO
1120void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1121{
1122 kvm_x86_ops->vcpu_load(vcpu, cpu);
c8076604 1123 kvm_request_guest_time_update(vcpu);
313a3dc7
CO
1124}
1125
1126void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1127{
1128 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1129 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1130}
1131
07716717 1132static int is_efer_nx(void)
313a3dc7 1133{
e286e86e 1134 unsigned long long efer = 0;
313a3dc7 1135
e286e86e 1136 rdmsrl_safe(MSR_EFER, &efer);
07716717
DK
1137 return efer & EFER_NX;
1138}
1139
1140static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1141{
1142 int i;
1143 struct kvm_cpuid_entry2 *e, *entry;
1144
313a3dc7 1145 entry = NULL;
ad312c7c
ZX
1146 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1147 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1148 if (e->function == 0x80000001) {
1149 entry = e;
1150 break;
1151 }
1152 }
07716717 1153 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1154 entry->edx &= ~(1 << 20);
1155 printk(KERN_INFO "kvm: guest NX capability removed\n");
1156 }
1157}
1158
07716717 1159/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1160static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1161 struct kvm_cpuid *cpuid,
1162 struct kvm_cpuid_entry __user *entries)
07716717
DK
1163{
1164 int r, i;
1165 struct kvm_cpuid_entry *cpuid_entries;
1166
1167 r = -E2BIG;
1168 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1169 goto out;
1170 r = -ENOMEM;
1171 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1172 if (!cpuid_entries)
1173 goto out;
1174 r = -EFAULT;
1175 if (copy_from_user(cpuid_entries, entries,
1176 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1177 goto out_free;
1178 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1179 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1180 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1181 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1182 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1183 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1184 vcpu->arch.cpuid_entries[i].index = 0;
1185 vcpu->arch.cpuid_entries[i].flags = 0;
1186 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1187 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1188 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1189 }
1190 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1191 cpuid_fix_nx_cap(vcpu);
1192 r = 0;
1193
1194out_free:
1195 vfree(cpuid_entries);
1196out:
1197 return r;
1198}
1199
1200static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1201 struct kvm_cpuid2 *cpuid,
1202 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1203{
1204 int r;
1205
1206 r = -E2BIG;
1207 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1208 goto out;
1209 r = -EFAULT;
ad312c7c 1210 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1211 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1212 goto out;
ad312c7c 1213 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1214 return 0;
1215
1216out:
1217 return r;
1218}
1219
07716717 1220static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1221 struct kvm_cpuid2 *cpuid,
1222 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1223{
1224 int r;
1225
1226 r = -E2BIG;
ad312c7c 1227 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1228 goto out;
1229 r = -EFAULT;
ad312c7c 1230 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1231 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1232 goto out;
1233 return 0;
1234
1235out:
ad312c7c 1236 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1237 return r;
1238}
1239
07716717 1240static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1241 u32 index)
07716717
DK
1242{
1243 entry->function = function;
1244 entry->index = index;
1245 cpuid_count(entry->function, entry->index,
19355475 1246 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1247 entry->flags = 0;
1248}
1249
7faa4ee1
AK
1250#define F(x) bit(X86_FEATURE_##x)
1251
07716717
DK
1252static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1253 u32 index, int *nent, int maxnent)
1254{
7faa4ee1 1255 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
07716717 1256#ifdef CONFIG_X86_64
7faa4ee1
AK
1257 unsigned f_lm = F(LM);
1258#else
1259 unsigned f_lm = 0;
07716717 1260#endif
7faa4ee1
AK
1261
1262 /* cpuid 1.edx */
1263 const u32 kvm_supported_word0_x86_features =
1264 F(FPU) | F(VME) | F(DE) | F(PSE) |
1265 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1266 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1267 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1268 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1269 0 /* Reserved, DS, ACPI */ | F(MMX) |
1270 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1271 0 /* HTT, TM, Reserved, PBE */;
1272 /* cpuid 0x80000001.edx */
1273 const u32 kvm_supported_word1_x86_features =
1274 F(FPU) | F(VME) | F(DE) | F(PSE) |
1275 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1276 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1277 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1278 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1279 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1280 F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
1281 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1282 /* cpuid 1.ecx */
1283 const u32 kvm_supported_word4_x86_features =
d149c731
AK
1284 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1285 0 /* DS-CPL, VMX, SMX, EST */ |
1286 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1287 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1288 0 /* Reserved, DCA */ | F(XMM4_1) |
1289 F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
1290 0 /* Reserved, XSAVE, OSXSAVE */;
7faa4ee1 1291 /* cpuid 0x80000001.ecx */
07716717 1292 const u32 kvm_supported_word6_x86_features =
7faa4ee1
AK
1293 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1294 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1295 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1296 0 /* SKINIT */ | 0 /* WDT */;
07716717 1297
19355475 1298 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1299 get_cpu();
1300 do_cpuid_1_ent(entry, function, index);
1301 ++*nent;
1302
1303 switch (function) {
1304 case 0:
1305 entry->eax = min(entry->eax, (u32)0xb);
1306 break;
1307 case 1:
1308 entry->edx &= kvm_supported_word0_x86_features;
7faa4ee1 1309 entry->ecx &= kvm_supported_word4_x86_features;
07716717
DK
1310 break;
1311 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1312 * may return different values. This forces us to get_cpu() before
1313 * issuing the first command, and also to emulate this annoying behavior
1314 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1315 case 2: {
1316 int t, times = entry->eax & 0xff;
1317
1318 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1319 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1320 for (t = 1; t < times && *nent < maxnent; ++t) {
1321 do_cpuid_1_ent(&entry[t], function, 0);
1322 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1323 ++*nent;
1324 }
1325 break;
1326 }
1327 /* function 4 and 0xb have additional index. */
1328 case 4: {
14af3f3c 1329 int i, cache_type;
07716717
DK
1330
1331 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1332 /* read more entries until cache_type is zero */
14af3f3c
HH
1333 for (i = 1; *nent < maxnent; ++i) {
1334 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1335 if (!cache_type)
1336 break;
14af3f3c
HH
1337 do_cpuid_1_ent(&entry[i], function, i);
1338 entry[i].flags |=
07716717
DK
1339 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1340 ++*nent;
1341 }
1342 break;
1343 }
1344 case 0xb: {
14af3f3c 1345 int i, level_type;
07716717
DK
1346
1347 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1348 /* read more entries until level_type is zero */
14af3f3c 1349 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1350 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1351 if (!level_type)
1352 break;
14af3f3c
HH
1353 do_cpuid_1_ent(&entry[i], function, i);
1354 entry[i].flags |=
07716717
DK
1355 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1356 ++*nent;
1357 }
1358 break;
1359 }
1360 case 0x80000000:
1361 entry->eax = min(entry->eax, 0x8000001a);
1362 break;
1363 case 0x80000001:
1364 entry->edx &= kvm_supported_word1_x86_features;
1365 entry->ecx &= kvm_supported_word6_x86_features;
1366 break;
1367 }
1368 put_cpu();
1369}
1370
7faa4ee1
AK
1371#undef F
1372
674eea0f 1373static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1374 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1375{
1376 struct kvm_cpuid_entry2 *cpuid_entries;
1377 int limit, nent = 0, r = -E2BIG;
1378 u32 func;
1379
1380 if (cpuid->nent < 1)
1381 goto out;
1382 r = -ENOMEM;
1383 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1384 if (!cpuid_entries)
1385 goto out;
1386
1387 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1388 limit = cpuid_entries[0].eax;
1389 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1390 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1391 &nent, cpuid->nent);
07716717
DK
1392 r = -E2BIG;
1393 if (nent >= cpuid->nent)
1394 goto out_free;
1395
1396 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1397 limit = cpuid_entries[nent - 1].eax;
1398 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1399 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1400 &nent, cpuid->nent);
07716717
DK
1401 r = -EFAULT;
1402 if (copy_to_user(entries, cpuid_entries,
19355475 1403 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1404 goto out_free;
1405 cpuid->nent = nent;
1406 r = 0;
1407
1408out_free:
1409 vfree(cpuid_entries);
1410out:
1411 return r;
1412}
1413
313a3dc7
CO
1414static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1415 struct kvm_lapic_state *s)
1416{
1417 vcpu_load(vcpu);
ad312c7c 1418 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1419 vcpu_put(vcpu);
1420
1421 return 0;
1422}
1423
1424static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1425 struct kvm_lapic_state *s)
1426{
1427 vcpu_load(vcpu);
ad312c7c 1428 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1429 kvm_apic_post_state_restore(vcpu);
1430 vcpu_put(vcpu);
1431
1432 return 0;
1433}
1434
f77bc6a4
ZX
1435static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1436 struct kvm_interrupt *irq)
1437{
1438 if (irq->irq < 0 || irq->irq >= 256)
1439 return -EINVAL;
1440 if (irqchip_in_kernel(vcpu->kvm))
1441 return -ENXIO;
1442 vcpu_load(vcpu);
1443
ad312c7c
ZX
1444 set_bit(irq->irq, vcpu->arch.irq_pending);
1445 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1446
1447 vcpu_put(vcpu);
1448
1449 return 0;
1450}
1451
c4abb7c9
JK
1452static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1453{
1454 vcpu_load(vcpu);
1455 kvm_inject_nmi(vcpu);
1456 vcpu_put(vcpu);
1457
1458 return 0;
1459}
1460
b209749f
AK
1461static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1462 struct kvm_tpr_access_ctl *tac)
1463{
1464 if (tac->flags)
1465 return -EINVAL;
1466 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1467 return 0;
1468}
1469
313a3dc7
CO
1470long kvm_arch_vcpu_ioctl(struct file *filp,
1471 unsigned int ioctl, unsigned long arg)
1472{
1473 struct kvm_vcpu *vcpu = filp->private_data;
1474 void __user *argp = (void __user *)arg;
1475 int r;
b772ff36 1476 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1477
1478 switch (ioctl) {
1479 case KVM_GET_LAPIC: {
b772ff36 1480 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1481
b772ff36
DH
1482 r = -ENOMEM;
1483 if (!lapic)
1484 goto out;
1485 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1486 if (r)
1487 goto out;
1488 r = -EFAULT;
b772ff36 1489 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1490 goto out;
1491 r = 0;
1492 break;
1493 }
1494 case KVM_SET_LAPIC: {
b772ff36
DH
1495 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1496 r = -ENOMEM;
1497 if (!lapic)
1498 goto out;
313a3dc7 1499 r = -EFAULT;
b772ff36 1500 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1501 goto out;
b772ff36 1502 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1503 if (r)
1504 goto out;
1505 r = 0;
1506 break;
1507 }
f77bc6a4
ZX
1508 case KVM_INTERRUPT: {
1509 struct kvm_interrupt irq;
1510
1511 r = -EFAULT;
1512 if (copy_from_user(&irq, argp, sizeof irq))
1513 goto out;
1514 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1515 if (r)
1516 goto out;
1517 r = 0;
1518 break;
1519 }
c4abb7c9
JK
1520 case KVM_NMI: {
1521 r = kvm_vcpu_ioctl_nmi(vcpu);
1522 if (r)
1523 goto out;
1524 r = 0;
1525 break;
1526 }
313a3dc7
CO
1527 case KVM_SET_CPUID: {
1528 struct kvm_cpuid __user *cpuid_arg = argp;
1529 struct kvm_cpuid cpuid;
1530
1531 r = -EFAULT;
1532 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1533 goto out;
1534 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1535 if (r)
1536 goto out;
1537 break;
1538 }
07716717
DK
1539 case KVM_SET_CPUID2: {
1540 struct kvm_cpuid2 __user *cpuid_arg = argp;
1541 struct kvm_cpuid2 cpuid;
1542
1543 r = -EFAULT;
1544 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1545 goto out;
1546 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1547 cpuid_arg->entries);
07716717
DK
1548 if (r)
1549 goto out;
1550 break;
1551 }
1552 case KVM_GET_CPUID2: {
1553 struct kvm_cpuid2 __user *cpuid_arg = argp;
1554 struct kvm_cpuid2 cpuid;
1555
1556 r = -EFAULT;
1557 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1558 goto out;
1559 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1560 cpuid_arg->entries);
07716717
DK
1561 if (r)
1562 goto out;
1563 r = -EFAULT;
1564 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1565 goto out;
1566 r = 0;
1567 break;
1568 }
313a3dc7
CO
1569 case KVM_GET_MSRS:
1570 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1571 break;
1572 case KVM_SET_MSRS:
1573 r = msr_io(vcpu, argp, do_set_msr, 0);
1574 break;
b209749f
AK
1575 case KVM_TPR_ACCESS_REPORTING: {
1576 struct kvm_tpr_access_ctl tac;
1577
1578 r = -EFAULT;
1579 if (copy_from_user(&tac, argp, sizeof tac))
1580 goto out;
1581 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1582 if (r)
1583 goto out;
1584 r = -EFAULT;
1585 if (copy_to_user(argp, &tac, sizeof tac))
1586 goto out;
1587 r = 0;
1588 break;
1589 };
b93463aa
AK
1590 case KVM_SET_VAPIC_ADDR: {
1591 struct kvm_vapic_addr va;
1592
1593 r = -EINVAL;
1594 if (!irqchip_in_kernel(vcpu->kvm))
1595 goto out;
1596 r = -EFAULT;
1597 if (copy_from_user(&va, argp, sizeof va))
1598 goto out;
1599 r = 0;
1600 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1601 break;
1602 }
313a3dc7
CO
1603 default:
1604 r = -EINVAL;
1605 }
1606out:
7a6ce84c 1607 kfree(lapic);
313a3dc7
CO
1608 return r;
1609}
1610
1fe779f8
CO
1611static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1612{
1613 int ret;
1614
1615 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1616 return -1;
1617 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1618 return ret;
1619}
1620
1621static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1622 u32 kvm_nr_mmu_pages)
1623{
1624 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1625 return -EINVAL;
1626
72dc67a6 1627 down_write(&kvm->slots_lock);
7c8a83b7 1628 spin_lock(&kvm->mmu_lock);
1fe779f8
CO
1629
1630 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1631 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1632
7c8a83b7 1633 spin_unlock(&kvm->mmu_lock);
72dc67a6 1634 up_write(&kvm->slots_lock);
1fe779f8
CO
1635 return 0;
1636}
1637
1638static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1639{
f05e70ac 1640 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1641}
1642
e9f85cde
ZX
1643gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1644{
1645 int i;
1646 struct kvm_mem_alias *alias;
1647
d69fb81f
ZX
1648 for (i = 0; i < kvm->arch.naliases; ++i) {
1649 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1650 if (gfn >= alias->base_gfn
1651 && gfn < alias->base_gfn + alias->npages)
1652 return alias->target_gfn + gfn - alias->base_gfn;
1653 }
1654 return gfn;
1655}
1656
1fe779f8
CO
1657/*
1658 * Set a new alias region. Aliases map a portion of physical memory into
1659 * another portion. This is useful for memory windows, for example the PC
1660 * VGA region.
1661 */
1662static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1663 struct kvm_memory_alias *alias)
1664{
1665 int r, n;
1666 struct kvm_mem_alias *p;
1667
1668 r = -EINVAL;
1669 /* General sanity checks */
1670 if (alias->memory_size & (PAGE_SIZE - 1))
1671 goto out;
1672 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1673 goto out;
1674 if (alias->slot >= KVM_ALIAS_SLOTS)
1675 goto out;
1676 if (alias->guest_phys_addr + alias->memory_size
1677 < alias->guest_phys_addr)
1678 goto out;
1679 if (alias->target_phys_addr + alias->memory_size
1680 < alias->target_phys_addr)
1681 goto out;
1682
72dc67a6 1683 down_write(&kvm->slots_lock);
a1708ce8 1684 spin_lock(&kvm->mmu_lock);
1fe779f8 1685
d69fb81f 1686 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1687 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1688 p->npages = alias->memory_size >> PAGE_SHIFT;
1689 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1690
1691 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1692 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1693 break;
d69fb81f 1694 kvm->arch.naliases = n;
1fe779f8 1695
a1708ce8 1696 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1697 kvm_mmu_zap_all(kvm);
1698
72dc67a6 1699 up_write(&kvm->slots_lock);
1fe779f8
CO
1700
1701 return 0;
1702
1703out:
1704 return r;
1705}
1706
1707static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1708{
1709 int r;
1710
1711 r = 0;
1712 switch (chip->chip_id) {
1713 case KVM_IRQCHIP_PIC_MASTER:
1714 memcpy(&chip->chip.pic,
1715 &pic_irqchip(kvm)->pics[0],
1716 sizeof(struct kvm_pic_state));
1717 break;
1718 case KVM_IRQCHIP_PIC_SLAVE:
1719 memcpy(&chip->chip.pic,
1720 &pic_irqchip(kvm)->pics[1],
1721 sizeof(struct kvm_pic_state));
1722 break;
1723 case KVM_IRQCHIP_IOAPIC:
1724 memcpy(&chip->chip.ioapic,
1725 ioapic_irqchip(kvm),
1726 sizeof(struct kvm_ioapic_state));
1727 break;
1728 default:
1729 r = -EINVAL;
1730 break;
1731 }
1732 return r;
1733}
1734
1735static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1736{
1737 int r;
1738
1739 r = 0;
1740 switch (chip->chip_id) {
1741 case KVM_IRQCHIP_PIC_MASTER:
1742 memcpy(&pic_irqchip(kvm)->pics[0],
1743 &chip->chip.pic,
1744 sizeof(struct kvm_pic_state));
1745 break;
1746 case KVM_IRQCHIP_PIC_SLAVE:
1747 memcpy(&pic_irqchip(kvm)->pics[1],
1748 &chip->chip.pic,
1749 sizeof(struct kvm_pic_state));
1750 break;
1751 case KVM_IRQCHIP_IOAPIC:
1752 memcpy(ioapic_irqchip(kvm),
1753 &chip->chip.ioapic,
1754 sizeof(struct kvm_ioapic_state));
1755 break;
1756 default:
1757 r = -EINVAL;
1758 break;
1759 }
1760 kvm_pic_update_irq(pic_irqchip(kvm));
1761 return r;
1762}
1763
e0f63cb9
SY
1764static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1765{
1766 int r = 0;
1767
1768 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1769 return r;
1770}
1771
1772static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1773{
1774 int r = 0;
1775
1776 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1777 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1778 return r;
1779}
1780
52d939a0
MT
1781static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1782 struct kvm_reinject_control *control)
1783{
1784 if (!kvm->arch.vpit)
1785 return -ENXIO;
1786 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1787 return 0;
1788}
1789
5bb064dc
ZX
1790/*
1791 * Get (and clear) the dirty memory log for a memory slot.
1792 */
1793int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1794 struct kvm_dirty_log *log)
1795{
1796 int r;
1797 int n;
1798 struct kvm_memory_slot *memslot;
1799 int is_dirty = 0;
1800
72dc67a6 1801 down_write(&kvm->slots_lock);
5bb064dc
ZX
1802
1803 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1804 if (r)
1805 goto out;
1806
1807 /* If nothing is dirty, don't bother messing with page tables. */
1808 if (is_dirty) {
7c8a83b7 1809 spin_lock(&kvm->mmu_lock);
5bb064dc 1810 kvm_mmu_slot_remove_write_access(kvm, log->slot);
7c8a83b7 1811 spin_unlock(&kvm->mmu_lock);
5bb064dc
ZX
1812 kvm_flush_remote_tlbs(kvm);
1813 memslot = &kvm->memslots[log->slot];
1814 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1815 memset(memslot->dirty_bitmap, 0, n);
1816 }
1817 r = 0;
1818out:
72dc67a6 1819 up_write(&kvm->slots_lock);
5bb064dc
ZX
1820 return r;
1821}
1822
1fe779f8
CO
1823long kvm_arch_vm_ioctl(struct file *filp,
1824 unsigned int ioctl, unsigned long arg)
1825{
1826 struct kvm *kvm = filp->private_data;
1827 void __user *argp = (void __user *)arg;
1828 int r = -EINVAL;
f0d66275
DH
1829 /*
1830 * This union makes it completely explicit to gcc-3.x
1831 * that these two variables' stack usage should be
1832 * combined, not added together.
1833 */
1834 union {
1835 struct kvm_pit_state ps;
1836 struct kvm_memory_alias alias;
1837 } u;
1fe779f8
CO
1838
1839 switch (ioctl) {
1840 case KVM_SET_TSS_ADDR:
1841 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1842 if (r < 0)
1843 goto out;
1844 break;
1845 case KVM_SET_MEMORY_REGION: {
1846 struct kvm_memory_region kvm_mem;
1847 struct kvm_userspace_memory_region kvm_userspace_mem;
1848
1849 r = -EFAULT;
1850 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1851 goto out;
1852 kvm_userspace_mem.slot = kvm_mem.slot;
1853 kvm_userspace_mem.flags = kvm_mem.flags;
1854 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1855 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1856 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1857 if (r)
1858 goto out;
1859 break;
1860 }
1861 case KVM_SET_NR_MMU_PAGES:
1862 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1863 if (r)
1864 goto out;
1865 break;
1866 case KVM_GET_NR_MMU_PAGES:
1867 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1868 break;
f0d66275 1869 case KVM_SET_MEMORY_ALIAS:
1fe779f8 1870 r = -EFAULT;
f0d66275 1871 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 1872 goto out;
f0d66275 1873 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
1874 if (r)
1875 goto out;
1876 break;
1fe779f8
CO
1877 case KVM_CREATE_IRQCHIP:
1878 r = -ENOMEM;
d7deeeb0
ZX
1879 kvm->arch.vpic = kvm_create_pic(kvm);
1880 if (kvm->arch.vpic) {
1fe779f8
CO
1881 r = kvm_ioapic_init(kvm);
1882 if (r) {
d7deeeb0
ZX
1883 kfree(kvm->arch.vpic);
1884 kvm->arch.vpic = NULL;
1fe779f8
CO
1885 goto out;
1886 }
1887 } else
1888 goto out;
399ec807
AK
1889 r = kvm_setup_default_irq_routing(kvm);
1890 if (r) {
1891 kfree(kvm->arch.vpic);
1892 kfree(kvm->arch.vioapic);
1893 goto out;
1894 }
1fe779f8 1895 break;
7837699f 1896 case KVM_CREATE_PIT:
269e05e4
AK
1897 mutex_lock(&kvm->lock);
1898 r = -EEXIST;
1899 if (kvm->arch.vpit)
1900 goto create_pit_unlock;
7837699f
SY
1901 r = -ENOMEM;
1902 kvm->arch.vpit = kvm_create_pit(kvm);
1903 if (kvm->arch.vpit)
1904 r = 0;
269e05e4
AK
1905 create_pit_unlock:
1906 mutex_unlock(&kvm->lock);
7837699f 1907 break;
4925663a 1908 case KVM_IRQ_LINE_STATUS:
1fe779f8
CO
1909 case KVM_IRQ_LINE: {
1910 struct kvm_irq_level irq_event;
1911
1912 r = -EFAULT;
1913 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1914 goto out;
1915 if (irqchip_in_kernel(kvm)) {
4925663a 1916 __s32 status;
1fe779f8 1917 mutex_lock(&kvm->lock);
4925663a
GN
1918 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1919 irq_event.irq, irq_event.level);
1fe779f8 1920 mutex_unlock(&kvm->lock);
4925663a
GN
1921 if (ioctl == KVM_IRQ_LINE_STATUS) {
1922 irq_event.status = status;
1923 if (copy_to_user(argp, &irq_event,
1924 sizeof irq_event))
1925 goto out;
1926 }
1fe779f8
CO
1927 r = 0;
1928 }
1929 break;
1930 }
1931 case KVM_GET_IRQCHIP: {
1932 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1933 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1934
f0d66275
DH
1935 r = -ENOMEM;
1936 if (!chip)
1fe779f8 1937 goto out;
f0d66275
DH
1938 r = -EFAULT;
1939 if (copy_from_user(chip, argp, sizeof *chip))
1940 goto get_irqchip_out;
1fe779f8
CO
1941 r = -ENXIO;
1942 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1943 goto get_irqchip_out;
1944 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 1945 if (r)
f0d66275 1946 goto get_irqchip_out;
1fe779f8 1947 r = -EFAULT;
f0d66275
DH
1948 if (copy_to_user(argp, chip, sizeof *chip))
1949 goto get_irqchip_out;
1fe779f8 1950 r = 0;
f0d66275
DH
1951 get_irqchip_out:
1952 kfree(chip);
1953 if (r)
1954 goto out;
1fe779f8
CO
1955 break;
1956 }
1957 case KVM_SET_IRQCHIP: {
1958 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1959 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1960
f0d66275
DH
1961 r = -ENOMEM;
1962 if (!chip)
1fe779f8 1963 goto out;
f0d66275
DH
1964 r = -EFAULT;
1965 if (copy_from_user(chip, argp, sizeof *chip))
1966 goto set_irqchip_out;
1fe779f8
CO
1967 r = -ENXIO;
1968 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1969 goto set_irqchip_out;
1970 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 1971 if (r)
f0d66275 1972 goto set_irqchip_out;
1fe779f8 1973 r = 0;
f0d66275
DH
1974 set_irqchip_out:
1975 kfree(chip);
1976 if (r)
1977 goto out;
1fe779f8
CO
1978 break;
1979 }
e0f63cb9 1980 case KVM_GET_PIT: {
e0f63cb9 1981 r = -EFAULT;
f0d66275 1982 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1983 goto out;
1984 r = -ENXIO;
1985 if (!kvm->arch.vpit)
1986 goto out;
f0d66275 1987 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
1988 if (r)
1989 goto out;
1990 r = -EFAULT;
f0d66275 1991 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1992 goto out;
1993 r = 0;
1994 break;
1995 }
1996 case KVM_SET_PIT: {
e0f63cb9 1997 r = -EFAULT;
f0d66275 1998 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
1999 goto out;
2000 r = -ENXIO;
2001 if (!kvm->arch.vpit)
2002 goto out;
f0d66275 2003 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
2004 if (r)
2005 goto out;
2006 r = 0;
2007 break;
2008 }
52d939a0
MT
2009 case KVM_REINJECT_CONTROL: {
2010 struct kvm_reinject_control control;
2011 r = -EFAULT;
2012 if (copy_from_user(&control, argp, sizeof(control)))
2013 goto out;
2014 r = kvm_vm_ioctl_reinject(kvm, &control);
2015 if (r)
2016 goto out;
2017 r = 0;
2018 break;
2019 }
1fe779f8
CO
2020 default:
2021 ;
2022 }
2023out:
2024 return r;
2025}
2026
a16b043c 2027static void kvm_init_msr_list(void)
043405e1
CO
2028{
2029 u32 dummy[2];
2030 unsigned i, j;
2031
2032 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2033 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2034 continue;
2035 if (j < i)
2036 msrs_to_save[j] = msrs_to_save[i];
2037 j++;
2038 }
2039 num_msrs_to_save = j;
2040}
2041
bbd9b64e
CO
2042/*
2043 * Only apic need an MMIO device hook, so shortcut now..
2044 */
2045static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
2046 gpa_t addr, int len,
2047 int is_write)
bbd9b64e
CO
2048{
2049 struct kvm_io_device *dev;
2050
ad312c7c
ZX
2051 if (vcpu->arch.apic) {
2052 dev = &vcpu->arch.apic->dev;
92760499 2053 if (dev->in_range(dev, addr, len, is_write))
bbd9b64e
CO
2054 return dev;
2055 }
2056 return NULL;
2057}
2058
2059
2060static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2061 gpa_t addr, int len,
2062 int is_write)
bbd9b64e
CO
2063{
2064 struct kvm_io_device *dev;
2065
92760499 2066 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2067 if (dev == NULL)
92760499
LV
2068 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2069 is_write);
bbd9b64e
CO
2070 return dev;
2071}
2072
cded19f3
HE
2073static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2074 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2075{
2076 void *data = val;
10589a46 2077 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2078
2079 while (bytes) {
ad312c7c 2080 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2081 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2082 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2083 int ret;
2084
10589a46
MT
2085 if (gpa == UNMAPPED_GVA) {
2086 r = X86EMUL_PROPAGATE_FAULT;
2087 goto out;
2088 }
77c2002e 2089 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2090 if (ret < 0) {
2091 r = X86EMUL_UNHANDLEABLE;
2092 goto out;
2093 }
bbd9b64e 2094
77c2002e
IE
2095 bytes -= toread;
2096 data += toread;
2097 addr += toread;
bbd9b64e 2098 }
10589a46 2099out:
10589a46 2100 return r;
bbd9b64e 2101}
77c2002e 2102
cded19f3
HE
2103static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2104 struct kvm_vcpu *vcpu)
77c2002e
IE
2105{
2106 void *data = val;
2107 int r = X86EMUL_CONTINUE;
2108
2109 while (bytes) {
2110 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2111 unsigned offset = addr & (PAGE_SIZE-1);
2112 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2113 int ret;
2114
2115 if (gpa == UNMAPPED_GVA) {
2116 r = X86EMUL_PROPAGATE_FAULT;
2117 goto out;
2118 }
2119 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2120 if (ret < 0) {
2121 r = X86EMUL_UNHANDLEABLE;
2122 goto out;
2123 }
2124
2125 bytes -= towrite;
2126 data += towrite;
2127 addr += towrite;
2128 }
2129out:
2130 return r;
2131}
2132
bbd9b64e 2133
bbd9b64e
CO
2134static int emulator_read_emulated(unsigned long addr,
2135 void *val,
2136 unsigned int bytes,
2137 struct kvm_vcpu *vcpu)
2138{
2139 struct kvm_io_device *mmio_dev;
2140 gpa_t gpa;
2141
2142 if (vcpu->mmio_read_completed) {
2143 memcpy(val, vcpu->mmio_data, bytes);
2144 vcpu->mmio_read_completed = 0;
2145 return X86EMUL_CONTINUE;
2146 }
2147
ad312c7c 2148 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2149
2150 /* For APIC access vmexit */
2151 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2152 goto mmio;
2153
77c2002e
IE
2154 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2155 == X86EMUL_CONTINUE)
bbd9b64e
CO
2156 return X86EMUL_CONTINUE;
2157 if (gpa == UNMAPPED_GVA)
2158 return X86EMUL_PROPAGATE_FAULT;
2159
2160mmio:
2161 /*
2162 * Is this MMIO handled locally?
2163 */
10589a46 2164 mutex_lock(&vcpu->kvm->lock);
92760499 2165 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
2166 if (mmio_dev) {
2167 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 2168 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2169 return X86EMUL_CONTINUE;
2170 }
10589a46 2171 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2172
2173 vcpu->mmio_needed = 1;
2174 vcpu->mmio_phys_addr = gpa;
2175 vcpu->mmio_size = bytes;
2176 vcpu->mmio_is_write = 0;
2177
2178 return X86EMUL_UNHANDLEABLE;
2179}
2180
3200f405 2181int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2182 const void *val, int bytes)
bbd9b64e
CO
2183{
2184 int ret;
2185
2186 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2187 if (ret < 0)
bbd9b64e 2188 return 0;
ad218f85 2189 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2190 return 1;
2191}
2192
2193static int emulator_write_emulated_onepage(unsigned long addr,
2194 const void *val,
2195 unsigned int bytes,
2196 struct kvm_vcpu *vcpu)
2197{
2198 struct kvm_io_device *mmio_dev;
10589a46
MT
2199 gpa_t gpa;
2200
10589a46 2201 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2202
2203 if (gpa == UNMAPPED_GVA) {
c3c91fee 2204 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2205 return X86EMUL_PROPAGATE_FAULT;
2206 }
2207
2208 /* For APIC access vmexit */
2209 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2210 goto mmio;
2211
2212 if (emulator_write_phys(vcpu, gpa, val, bytes))
2213 return X86EMUL_CONTINUE;
2214
2215mmio:
2216 /*
2217 * Is this MMIO handled locally?
2218 */
10589a46 2219 mutex_lock(&vcpu->kvm->lock);
92760499 2220 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
2221 if (mmio_dev) {
2222 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 2223 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2224 return X86EMUL_CONTINUE;
2225 }
10589a46 2226 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2227
2228 vcpu->mmio_needed = 1;
2229 vcpu->mmio_phys_addr = gpa;
2230 vcpu->mmio_size = bytes;
2231 vcpu->mmio_is_write = 1;
2232 memcpy(vcpu->mmio_data, val, bytes);
2233
2234 return X86EMUL_CONTINUE;
2235}
2236
2237int emulator_write_emulated(unsigned long addr,
2238 const void *val,
2239 unsigned int bytes,
2240 struct kvm_vcpu *vcpu)
2241{
2242 /* Crossing a page boundary? */
2243 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2244 int rc, now;
2245
2246 now = -addr & ~PAGE_MASK;
2247 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2248 if (rc != X86EMUL_CONTINUE)
2249 return rc;
2250 addr += now;
2251 val += now;
2252 bytes -= now;
2253 }
2254 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2255}
2256EXPORT_SYMBOL_GPL(emulator_write_emulated);
2257
2258static int emulator_cmpxchg_emulated(unsigned long addr,
2259 const void *old,
2260 const void *new,
2261 unsigned int bytes,
2262 struct kvm_vcpu *vcpu)
2263{
2264 static int reported;
2265
2266 if (!reported) {
2267 reported = 1;
2268 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2269 }
2bacc55c
MT
2270#ifndef CONFIG_X86_64
2271 /* guests cmpxchg8b have to be emulated atomically */
2272 if (bytes == 8) {
10589a46 2273 gpa_t gpa;
2bacc55c 2274 struct page *page;
c0b49b0d 2275 char *kaddr;
2bacc55c
MT
2276 u64 val;
2277
10589a46
MT
2278 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2279
2bacc55c
MT
2280 if (gpa == UNMAPPED_GVA ||
2281 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2282 goto emul_write;
2283
2284 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2285 goto emul_write;
2286
2287 val = *(u64 *)new;
72dc67a6 2288
2bacc55c 2289 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2290
c0b49b0d
AM
2291 kaddr = kmap_atomic(page, KM_USER0);
2292 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2293 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2294 kvm_release_page_dirty(page);
2295 }
3200f405 2296emul_write:
2bacc55c
MT
2297#endif
2298
bbd9b64e
CO
2299 return emulator_write_emulated(addr, new, bytes, vcpu);
2300}
2301
2302static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2303{
2304 return kvm_x86_ops->get_segment_base(vcpu, seg);
2305}
2306
2307int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2308{
a7052897 2309 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2310 return X86EMUL_CONTINUE;
2311}
2312
2313int emulate_clts(struct kvm_vcpu *vcpu)
2314{
54e445ca 2315 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2316 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2317 return X86EMUL_CONTINUE;
2318}
2319
2320int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2321{
2322 struct kvm_vcpu *vcpu = ctxt->vcpu;
2323
2324 switch (dr) {
2325 case 0 ... 3:
2326 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2327 return X86EMUL_CONTINUE;
2328 default:
b8688d51 2329 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2330 return X86EMUL_UNHANDLEABLE;
2331 }
2332}
2333
2334int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2335{
2336 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2337 int exception;
2338
2339 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2340 if (exception) {
2341 /* FIXME: better handling */
2342 return X86EMUL_UNHANDLEABLE;
2343 }
2344 return X86EMUL_CONTINUE;
2345}
2346
2347void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2348{
bbd9b64e 2349 u8 opcodes[4];
5fdbf976 2350 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2351 unsigned long rip_linear;
2352
f76c710d 2353 if (!printk_ratelimit())
bbd9b64e
CO
2354 return;
2355
25be4608
GC
2356 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2357
77c2002e 2358 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2359
2360 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2361 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2362}
2363EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2364
14af3f3c 2365static struct x86_emulate_ops emulate_ops = {
77c2002e 2366 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2367 .read_emulated = emulator_read_emulated,
2368 .write_emulated = emulator_write_emulated,
2369 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2370};
2371
5fdbf976
MT
2372static void cache_all_regs(struct kvm_vcpu *vcpu)
2373{
2374 kvm_register_read(vcpu, VCPU_REGS_RAX);
2375 kvm_register_read(vcpu, VCPU_REGS_RSP);
2376 kvm_register_read(vcpu, VCPU_REGS_RIP);
2377 vcpu->arch.regs_dirty = ~0;
2378}
2379
bbd9b64e
CO
2380int emulate_instruction(struct kvm_vcpu *vcpu,
2381 struct kvm_run *run,
2382 unsigned long cr2,
2383 u16 error_code,
571008da 2384 int emulation_type)
bbd9b64e 2385{
310b5d30 2386 int r, shadow_mask;
571008da 2387 struct decode_cache *c;
bbd9b64e 2388
26eef70c 2389 kvm_clear_exception_queue(vcpu);
ad312c7c 2390 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2391 /*
2392 * TODO: fix x86_emulate.c to use guest_read/write_register
2393 * instead of direct ->regs accesses, can save hundred cycles
2394 * on Intel for instructions that don't read/change RSP, for
2395 * for example.
2396 */
2397 cache_all_regs(vcpu);
bbd9b64e
CO
2398
2399 vcpu->mmio_is_write = 0;
ad312c7c 2400 vcpu->arch.pio.string = 0;
bbd9b64e 2401
571008da 2402 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2403 int cs_db, cs_l;
2404 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2405
ad312c7c
ZX
2406 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2407 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2408 vcpu->arch.emulate_ctxt.mode =
2409 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2410 ? X86EMUL_MODE_REAL : cs_l
2411 ? X86EMUL_MODE_PROT64 : cs_db
2412 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2413
ad312c7c 2414 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2415
2416 /* Reject the instructions other than VMCALL/VMMCALL when
2417 * try to emulate invalid opcode */
2418 c = &vcpu->arch.emulate_ctxt.decode;
2419 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2420 (!(c->twobyte && c->b == 0x01 &&
2421 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2422 c->modrm_mod == 3 && c->modrm_rm == 1)))
2423 return EMULATE_FAIL;
2424
f2b5756b 2425 ++vcpu->stat.insn_emulation;
bbd9b64e 2426 if (r) {
f2b5756b 2427 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2428 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2429 return EMULATE_DONE;
2430 return EMULATE_FAIL;
2431 }
2432 }
2433
ba8afb6b
GN
2434 if (emulation_type & EMULTYPE_SKIP) {
2435 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
2436 return EMULATE_DONE;
2437 }
2438
ad312c7c 2439 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
310b5d30
GC
2440 shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
2441
2442 if (r == 0)
2443 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
bbd9b64e 2444
ad312c7c 2445 if (vcpu->arch.pio.string)
bbd9b64e
CO
2446 return EMULATE_DO_MMIO;
2447
2448 if ((r || vcpu->mmio_is_write) && run) {
2449 run->exit_reason = KVM_EXIT_MMIO;
2450 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2451 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2452 run->mmio.len = vcpu->mmio_size;
2453 run->mmio.is_write = vcpu->mmio_is_write;
2454 }
2455
2456 if (r) {
2457 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2458 return EMULATE_DONE;
2459 if (!vcpu->mmio_needed) {
2460 kvm_report_emulation_failure(vcpu, "mmio");
2461 return EMULATE_FAIL;
2462 }
2463 return EMULATE_DO_MMIO;
2464 }
2465
ad312c7c 2466 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2467
2468 if (vcpu->mmio_is_write) {
2469 vcpu->mmio_needed = 0;
2470 return EMULATE_DO_MMIO;
2471 }
2472
2473 return EMULATE_DONE;
2474}
2475EXPORT_SYMBOL_GPL(emulate_instruction);
2476
de7d789a
CO
2477static int pio_copy_data(struct kvm_vcpu *vcpu)
2478{
ad312c7c 2479 void *p = vcpu->arch.pio_data;
0f346074 2480 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2481 unsigned bytes;
0f346074 2482 int ret;
de7d789a 2483
ad312c7c
ZX
2484 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2485 if (vcpu->arch.pio.in)
0f346074 2486 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2487 else
0f346074
IE
2488 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2489 return ret;
de7d789a
CO
2490}
2491
2492int complete_pio(struct kvm_vcpu *vcpu)
2493{
ad312c7c 2494 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2495 long delta;
2496 int r;
5fdbf976 2497 unsigned long val;
de7d789a
CO
2498
2499 if (!io->string) {
5fdbf976
MT
2500 if (io->in) {
2501 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2502 memcpy(&val, vcpu->arch.pio_data, io->size);
2503 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2504 }
de7d789a
CO
2505 } else {
2506 if (io->in) {
2507 r = pio_copy_data(vcpu);
5fdbf976 2508 if (r)
de7d789a 2509 return r;
de7d789a
CO
2510 }
2511
2512 delta = 1;
2513 if (io->rep) {
2514 delta *= io->cur_count;
2515 /*
2516 * The size of the register should really depend on
2517 * current address size.
2518 */
5fdbf976
MT
2519 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2520 val -= delta;
2521 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2522 }
2523 if (io->down)
2524 delta = -delta;
2525 delta *= io->size;
5fdbf976
MT
2526 if (io->in) {
2527 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2528 val += delta;
2529 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2530 } else {
2531 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2532 val += delta;
2533 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2534 }
de7d789a
CO
2535 }
2536
de7d789a
CO
2537 io->count -= io->cur_count;
2538 io->cur_count = 0;
2539
2540 return 0;
2541}
2542
2543static void kernel_pio(struct kvm_io_device *pio_dev,
2544 struct kvm_vcpu *vcpu,
2545 void *pd)
2546{
2547 /* TODO: String I/O for in kernel device */
2548
2549 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2550 if (vcpu->arch.pio.in)
2551 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2552 vcpu->arch.pio.size,
de7d789a
CO
2553 pd);
2554 else
ad312c7c
ZX
2555 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2556 vcpu->arch.pio.size,
de7d789a
CO
2557 pd);
2558 mutex_unlock(&vcpu->kvm->lock);
2559}
2560
2561static void pio_string_write(struct kvm_io_device *pio_dev,
2562 struct kvm_vcpu *vcpu)
2563{
ad312c7c
ZX
2564 struct kvm_pio_request *io = &vcpu->arch.pio;
2565 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2566 int i;
2567
2568 mutex_lock(&vcpu->kvm->lock);
2569 for (i = 0; i < io->cur_count; i++) {
2570 kvm_iodevice_write(pio_dev, io->port,
2571 io->size,
2572 pd);
2573 pd += io->size;
2574 }
2575 mutex_unlock(&vcpu->kvm->lock);
2576}
2577
2578static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2579 gpa_t addr, int len,
2580 int is_write)
de7d789a 2581{
92760499 2582 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2583}
2584
2585int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2586 int size, unsigned port)
2587{
2588 struct kvm_io_device *pio_dev;
5fdbf976 2589 unsigned long val;
de7d789a
CO
2590
2591 vcpu->run->exit_reason = KVM_EXIT_IO;
2592 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2593 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2594 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2595 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2596 vcpu->run->io.port = vcpu->arch.pio.port = port;
2597 vcpu->arch.pio.in = in;
2598 vcpu->arch.pio.string = 0;
2599 vcpu->arch.pio.down = 0;
ad312c7c 2600 vcpu->arch.pio.rep = 0;
de7d789a 2601
2714d1d3
FEL
2602 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2603 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2604 handler);
2605 else
2606 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2607 handler);
2608
5fdbf976
MT
2609 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2610 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2611
92760499 2612 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2613 if (pio_dev) {
ad312c7c 2614 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2615 complete_pio(vcpu);
2616 return 1;
2617 }
2618 return 0;
2619}
2620EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2621
2622int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2623 int size, unsigned long count, int down,
2624 gva_t address, int rep, unsigned port)
2625{
2626 unsigned now, in_page;
0f346074 2627 int ret = 0;
de7d789a
CO
2628 struct kvm_io_device *pio_dev;
2629
2630 vcpu->run->exit_reason = KVM_EXIT_IO;
2631 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2632 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2633 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2634 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2635 vcpu->run->io.port = vcpu->arch.pio.port = port;
2636 vcpu->arch.pio.in = in;
2637 vcpu->arch.pio.string = 1;
2638 vcpu->arch.pio.down = down;
ad312c7c 2639 vcpu->arch.pio.rep = rep;
de7d789a 2640
2714d1d3
FEL
2641 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2642 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2643 handler);
2644 else
2645 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2646 handler);
2647
de7d789a
CO
2648 if (!count) {
2649 kvm_x86_ops->skip_emulated_instruction(vcpu);
2650 return 1;
2651 }
2652
2653 if (!down)
2654 in_page = PAGE_SIZE - offset_in_page(address);
2655 else
2656 in_page = offset_in_page(address) + size;
2657 now = min(count, (unsigned long)in_page / size);
0f346074 2658 if (!now)
de7d789a 2659 now = 1;
de7d789a
CO
2660 if (down) {
2661 /*
2662 * String I/O in reverse. Yuck. Kill the guest, fix later.
2663 */
2664 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2665 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2666 return 1;
2667 }
2668 vcpu->run->io.count = now;
ad312c7c 2669 vcpu->arch.pio.cur_count = now;
de7d789a 2670
ad312c7c 2671 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2672 kvm_x86_ops->skip_emulated_instruction(vcpu);
2673
0f346074 2674 vcpu->arch.pio.guest_gva = address;
de7d789a 2675
92760499
LV
2676 pio_dev = vcpu_find_pio_dev(vcpu, port,
2677 vcpu->arch.pio.cur_count,
2678 !vcpu->arch.pio.in);
ad312c7c 2679 if (!vcpu->arch.pio.in) {
de7d789a
CO
2680 /* string PIO write */
2681 ret = pio_copy_data(vcpu);
0f346074
IE
2682 if (ret == X86EMUL_PROPAGATE_FAULT) {
2683 kvm_inject_gp(vcpu, 0);
2684 return 1;
2685 }
2686 if (ret == 0 && pio_dev) {
de7d789a
CO
2687 pio_string_write(pio_dev, vcpu);
2688 complete_pio(vcpu);
ad312c7c 2689 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2690 ret = 1;
2691 }
2692 } else if (pio_dev)
2693 pr_unimpl(vcpu, "no string pio read support yet, "
2694 "port %x size %d count %ld\n",
2695 port, size, count);
2696
2697 return ret;
2698}
2699EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2700
c8076604
GH
2701static void bounce_off(void *info)
2702{
2703 /* nothing */
2704}
2705
2706static unsigned int ref_freq;
2707static unsigned long tsc_khz_ref;
2708
2709static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2710 void *data)
2711{
2712 struct cpufreq_freqs *freq = data;
2713 struct kvm *kvm;
2714 struct kvm_vcpu *vcpu;
2715 int i, send_ipi = 0;
2716
2717 if (!ref_freq)
2718 ref_freq = freq->old;
2719
2720 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2721 return 0;
2722 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2723 return 0;
2724 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2725
2726 spin_lock(&kvm_lock);
2727 list_for_each_entry(kvm, &vm_list, vm_list) {
2728 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2729 vcpu = kvm->vcpus[i];
2730 if (!vcpu)
2731 continue;
2732 if (vcpu->cpu != freq->cpu)
2733 continue;
2734 if (!kvm_request_guest_time_update(vcpu))
2735 continue;
2736 if (vcpu->cpu != smp_processor_id())
2737 send_ipi++;
2738 }
2739 }
2740 spin_unlock(&kvm_lock);
2741
2742 if (freq->old < freq->new && send_ipi) {
2743 /*
2744 * We upscale the frequency. Must make the guest
2745 * doesn't see old kvmclock values while running with
2746 * the new frequency, otherwise we risk the guest sees
2747 * time go backwards.
2748 *
2749 * In case we update the frequency for another cpu
2750 * (which might be in guest context) send an interrupt
2751 * to kick the cpu out of guest context. Next time
2752 * guest context is entered kvmclock will be updated,
2753 * so the guest will not see stale values.
2754 */
2755 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2756 }
2757 return 0;
2758}
2759
2760static struct notifier_block kvmclock_cpufreq_notifier_block = {
2761 .notifier_call = kvmclock_cpufreq_notifier
2762};
2763
f8c16bba 2764int kvm_arch_init(void *opaque)
043405e1 2765{
c8076604 2766 int r, cpu;
f8c16bba
ZX
2767 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2768
f8c16bba
ZX
2769 if (kvm_x86_ops) {
2770 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2771 r = -EEXIST;
2772 goto out;
f8c16bba
ZX
2773 }
2774
2775 if (!ops->cpu_has_kvm_support()) {
2776 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2777 r = -EOPNOTSUPP;
2778 goto out;
f8c16bba
ZX
2779 }
2780 if (ops->disabled_by_bios()) {
2781 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2782 r = -EOPNOTSUPP;
2783 goto out;
f8c16bba
ZX
2784 }
2785
97db56ce
AK
2786 r = kvm_mmu_module_init();
2787 if (r)
2788 goto out;
2789
2790 kvm_init_msr_list();
2791
f8c16bba 2792 kvm_x86_ops = ops;
56c6d28a 2793 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2794 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2795 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4b12f0de 2796 PT_DIRTY_MASK, PT64_NX_MASK, 0);
c8076604
GH
2797
2798 for_each_possible_cpu(cpu)
2799 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
2800 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2801 tsc_khz_ref = tsc_khz;
2802 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
2803 CPUFREQ_TRANSITION_NOTIFIER);
2804 }
2805
f8c16bba 2806 return 0;
56c6d28a
ZX
2807
2808out:
56c6d28a 2809 return r;
043405e1 2810}
8776e519 2811
f8c16bba
ZX
2812void kvm_arch_exit(void)
2813{
888d256e
JK
2814 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2815 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2816 CPUFREQ_TRANSITION_NOTIFIER);
f8c16bba 2817 kvm_x86_ops = NULL;
56c6d28a
ZX
2818 kvm_mmu_module_exit();
2819}
f8c16bba 2820
8776e519
HB
2821int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2822{
2823 ++vcpu->stat.halt_exits;
2714d1d3 2824 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2825 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2826 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
2827 return 1;
2828 } else {
2829 vcpu->run->exit_reason = KVM_EXIT_HLT;
2830 return 0;
2831 }
2832}
2833EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2834
2f333bcb
MT
2835static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2836 unsigned long a1)
2837{
2838 if (is_long_mode(vcpu))
2839 return a0;
2840 else
2841 return a0 | ((gpa_t)a1 << 32);
2842}
2843
8776e519
HB
2844int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2845{
2846 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2847 int r = 1;
8776e519 2848
5fdbf976
MT
2849 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2850 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2851 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2852 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2853 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 2854
2714d1d3
FEL
2855 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2856
8776e519
HB
2857 if (!is_long_mode(vcpu)) {
2858 nr &= 0xFFFFFFFF;
2859 a0 &= 0xFFFFFFFF;
2860 a1 &= 0xFFFFFFFF;
2861 a2 &= 0xFFFFFFFF;
2862 a3 &= 0xFFFFFFFF;
2863 }
2864
2865 switch (nr) {
b93463aa
AK
2866 case KVM_HC_VAPIC_POLL_IRQ:
2867 ret = 0;
2868 break;
2f333bcb
MT
2869 case KVM_HC_MMU_OP:
2870 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2871 break;
8776e519
HB
2872 default:
2873 ret = -KVM_ENOSYS;
2874 break;
2875 }
5fdbf976 2876 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 2877 ++vcpu->stat.hypercalls;
2f333bcb 2878 return r;
8776e519
HB
2879}
2880EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2881
2882int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2883{
2884 char instruction[3];
2885 int ret = 0;
5fdbf976 2886 unsigned long rip = kvm_rip_read(vcpu);
8776e519 2887
8776e519
HB
2888
2889 /*
2890 * Blow out the MMU to ensure that no other VCPU has an active mapping
2891 * to ensure that the updated hypercall appears atomically across all
2892 * VCPUs.
2893 */
2894 kvm_mmu_zap_all(vcpu->kvm);
2895
8776e519 2896 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 2897 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
2898 != X86EMUL_CONTINUE)
2899 ret = -EFAULT;
2900
8776e519
HB
2901 return ret;
2902}
2903
2904static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2905{
2906 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2907}
2908
2909void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2910{
2911 struct descriptor_table dt = { limit, base };
2912
2913 kvm_x86_ops->set_gdt(vcpu, &dt);
2914}
2915
2916void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2917{
2918 struct descriptor_table dt = { limit, base };
2919
2920 kvm_x86_ops->set_idt(vcpu, &dt);
2921}
2922
2923void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2924 unsigned long *rflags)
2925{
2d3ad1f4 2926 kvm_lmsw(vcpu, msw);
8776e519
HB
2927 *rflags = kvm_x86_ops->get_rflags(vcpu);
2928}
2929
2930unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2931{
54e445ca
JR
2932 unsigned long value;
2933
8776e519
HB
2934 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2935 switch (cr) {
2936 case 0:
54e445ca
JR
2937 value = vcpu->arch.cr0;
2938 break;
8776e519 2939 case 2:
54e445ca
JR
2940 value = vcpu->arch.cr2;
2941 break;
8776e519 2942 case 3:
54e445ca
JR
2943 value = vcpu->arch.cr3;
2944 break;
8776e519 2945 case 4:
54e445ca
JR
2946 value = vcpu->arch.cr4;
2947 break;
152ff9be 2948 case 8:
54e445ca
JR
2949 value = kvm_get_cr8(vcpu);
2950 break;
8776e519 2951 default:
b8688d51 2952 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2953 return 0;
2954 }
54e445ca
JR
2955 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2956 (u32)((u64)value >> 32), handler);
2957
2958 return value;
8776e519
HB
2959}
2960
2961void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2962 unsigned long *rflags)
2963{
54e445ca
JR
2964 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2965 (u32)((u64)val >> 32), handler);
2966
8776e519
HB
2967 switch (cr) {
2968 case 0:
2d3ad1f4 2969 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2970 *rflags = kvm_x86_ops->get_rflags(vcpu);
2971 break;
2972 case 2:
ad312c7c 2973 vcpu->arch.cr2 = val;
8776e519
HB
2974 break;
2975 case 3:
2d3ad1f4 2976 kvm_set_cr3(vcpu, val);
8776e519
HB
2977 break;
2978 case 4:
2d3ad1f4 2979 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2980 break;
152ff9be 2981 case 8:
2d3ad1f4 2982 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2983 break;
8776e519 2984 default:
b8688d51 2985 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2986 }
2987}
2988
07716717
DK
2989static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2990{
ad312c7c
ZX
2991 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2992 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2993
2994 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2995 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 2996 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 2997 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2998 if (ej->function == e->function) {
2999 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3000 return j;
3001 }
3002 }
3003 return 0; /* silence gcc, even though control never reaches here */
3004}
3005
3006/* find an entry with matching function, matching index (if needed), and that
3007 * should be read next (if it's stateful) */
3008static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3009 u32 function, u32 index)
3010{
3011 if (e->function != function)
3012 return 0;
3013 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3014 return 0;
3015 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 3016 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
3017 return 0;
3018 return 1;
3019}
3020
d8017474
AG
3021struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3022 u32 function, u32 index)
8776e519
HB
3023{
3024 int i;
d8017474 3025 struct kvm_cpuid_entry2 *best = NULL;
8776e519 3026
ad312c7c 3027 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
3028 struct kvm_cpuid_entry2 *e;
3029
ad312c7c 3030 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
3031 if (is_matching_cpuid_entry(e, function, index)) {
3032 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3033 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
3034 best = e;
3035 break;
3036 }
3037 /*
3038 * Both basic or both extended?
3039 */
3040 if (((e->function ^ function) & 0x80000000) == 0)
3041 if (!best || e->function > best->function)
3042 best = e;
3043 }
d8017474
AG
3044 return best;
3045}
3046
82725b20
DE
3047int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3048{
3049 struct kvm_cpuid_entry2 *best;
3050
3051 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3052 if (best)
3053 return best->eax & 0xff;
3054 return 36;
3055}
3056
d8017474
AG
3057void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3058{
3059 u32 function, index;
3060 struct kvm_cpuid_entry2 *best;
3061
3062 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3063 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3064 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3065 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3066 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3067 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3068 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 3069 if (best) {
5fdbf976
MT
3070 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3071 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3072 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3073 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 3074 }
8776e519 3075 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 3076 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
3077 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
3078 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
3079 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
3080 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
3081}
3082EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 3083
b6c7a5dc
HB
3084/*
3085 * Check if userspace requested an interrupt window, and that the
3086 * interrupt window is open.
3087 *
3088 * No need to exit to userspace if we already have an interrupt queued.
3089 */
3090static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3091 struct kvm_run *kvm_run)
3092{
8061823a 3093 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
b6c7a5dc 3094 kvm_run->request_interrupt_window &&
5df56646 3095 kvm_arch_interrupt_allowed(vcpu));
b6c7a5dc
HB
3096}
3097
3098static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3099 struct kvm_run *kvm_run)
3100{
3101 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 3102 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 3103 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 3104 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3105 kvm_run->ready_for_interrupt_injection = 1;
4531220b 3106 else
b6c7a5dc 3107 kvm_run->ready_for_interrupt_injection =
fa9726b0
GN
3108 kvm_arch_interrupt_allowed(vcpu) &&
3109 !kvm_cpu_has_interrupt(vcpu) &&
3110 !kvm_event_needs_reinjection(vcpu);
b6c7a5dc
HB
3111}
3112
b93463aa
AK
3113static void vapic_enter(struct kvm_vcpu *vcpu)
3114{
3115 struct kvm_lapic *apic = vcpu->arch.apic;
3116 struct page *page;
3117
3118 if (!apic || !apic->vapic_addr)
3119 return;
3120
3121 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
3122
3123 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
3124}
3125
3126static void vapic_exit(struct kvm_vcpu *vcpu)
3127{
3128 struct kvm_lapic *apic = vcpu->arch.apic;
3129
3130 if (!apic || !apic->vapic_addr)
3131 return;
3132
f8b78fa3 3133 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3134 kvm_release_page_dirty(apic->vapic_page);
3135 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 3136 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3137}
3138
95ba8273
GN
3139static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3140{
3141 int max_irr, tpr;
3142
3143 if (!kvm_x86_ops->update_cr8_intercept)
3144 return;
3145
3146 max_irr = kvm_lapic_find_highest_irr(vcpu);
3147
3148 if (max_irr != -1)
3149 max_irr >>= 4;
3150
3151 tpr = kvm_lapic_get_cr8(vcpu);
3152
3153 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3154}
3155
3156static void inject_irq(struct kvm_vcpu *vcpu)
3157{
3158 /* try to reinject previous events if any */
3159 if (vcpu->arch.nmi_injected) {
3160 kvm_x86_ops->set_nmi(vcpu);
3161 return;
3162 }
3163
3164 if (vcpu->arch.interrupt.pending) {
3165 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3166 return;
3167 }
3168
3169 /* try to inject new event if pending */
3170 if (vcpu->arch.nmi_pending) {
3171 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3172 vcpu->arch.nmi_pending = false;
3173 vcpu->arch.nmi_injected = true;
3174 kvm_x86_ops->set_nmi(vcpu);
3175 }
3176 } else if (kvm_cpu_has_interrupt(vcpu)) {
3177 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3178 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3179 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3180 }
3181 }
3182}
3183
3184static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3185{
3186 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3187 kvm_run->request_interrupt_window;
3188
3189 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2809f5d2 3190 kvm_x86_ops->set_interrupt_shadow(vcpu, 0);
95ba8273
GN
3191
3192 inject_irq(vcpu);
3193
3194 /* enable NMI/IRQ window open exits if needed */
3195 if (vcpu->arch.nmi_pending)
3196 kvm_x86_ops->enable_nmi_window(vcpu);
3197 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3198 kvm_x86_ops->enable_irq_window(vcpu);
3199}
3200
d7690175 3201static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
3202{
3203 int r;
3204
2e53d63a
MT
3205 if (vcpu->requests)
3206 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3207 kvm_mmu_unload(vcpu);
3208
b6c7a5dc
HB
3209 r = kvm_mmu_reload(vcpu);
3210 if (unlikely(r))
3211 goto out;
3212
2f52d58c
AK
3213 if (vcpu->requests) {
3214 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3215 __kvm_migrate_timers(vcpu);
c8076604
GH
3216 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3217 kvm_write_guest_time(vcpu);
4731d4c7
MT
3218 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3219 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3220 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3221 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3222 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3223 &vcpu->requests)) {
3224 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3225 r = 0;
3226 goto out;
3227 }
71c4dfaf
JR
3228 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3229 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3230 r = 0;
3231 goto out;
3232 }
2f52d58c 3233 }
b93463aa 3234
b6c7a5dc
HB
3235 preempt_disable();
3236
3237 kvm_x86_ops->prepare_guest_switch(vcpu);
3238 kvm_load_guest_fpu(vcpu);
3239
3240 local_irq_disable();
3241
32f88400
MT
3242 clear_bit(KVM_REQ_KICK, &vcpu->requests);
3243 smp_mb__after_clear_bit();
3244
d7690175 3245 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3246 local_irq_enable();
3247 preempt_enable();
3248 r = 1;
3249 goto out;
3250 }
3251
ad312c7c 3252 if (vcpu->arch.exception.pending)
298101da 3253 __queue_exception(vcpu);
eb9774f0 3254 else
95ba8273 3255 inject_pending_irq(vcpu, kvm_run);
b6c7a5dc 3256
95ba8273
GN
3257 if (kvm_lapic_enabled(vcpu)) {
3258 if (!vcpu->arch.apic->vapic_addr)
3259 update_cr8_intercept(vcpu);
3260 else
3261 kvm_lapic_sync_to_vapic(vcpu);
3262 }
b93463aa 3263
3200f405
MT
3264 up_read(&vcpu->kvm->slots_lock);
3265
b6c7a5dc
HB
3266 kvm_guest_enter();
3267
42dbaa5a
JK
3268 get_debugreg(vcpu->arch.host_dr6, 6);
3269 get_debugreg(vcpu->arch.host_dr7, 7);
3270 if (unlikely(vcpu->arch.switch_db_regs)) {
3271 get_debugreg(vcpu->arch.host_db[0], 0);
3272 get_debugreg(vcpu->arch.host_db[1], 1);
3273 get_debugreg(vcpu->arch.host_db[2], 2);
3274 get_debugreg(vcpu->arch.host_db[3], 3);
3275
3276 set_debugreg(0, 7);
3277 set_debugreg(vcpu->arch.eff_db[0], 0);
3278 set_debugreg(vcpu->arch.eff_db[1], 1);
3279 set_debugreg(vcpu->arch.eff_db[2], 2);
3280 set_debugreg(vcpu->arch.eff_db[3], 3);
3281 }
b6c7a5dc 3282
2714d1d3 3283 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3284 kvm_x86_ops->run(vcpu, kvm_run);
3285
42dbaa5a
JK
3286 if (unlikely(vcpu->arch.switch_db_regs)) {
3287 set_debugreg(0, 7);
3288 set_debugreg(vcpu->arch.host_db[0], 0);
3289 set_debugreg(vcpu->arch.host_db[1], 1);
3290 set_debugreg(vcpu->arch.host_db[2], 2);
3291 set_debugreg(vcpu->arch.host_db[3], 3);
3292 }
3293 set_debugreg(vcpu->arch.host_dr6, 6);
3294 set_debugreg(vcpu->arch.host_dr7, 7);
3295
32f88400 3296 set_bit(KVM_REQ_KICK, &vcpu->requests);
b6c7a5dc
HB
3297 local_irq_enable();
3298
3299 ++vcpu->stat.exits;
3300
3301 /*
3302 * We must have an instruction between local_irq_enable() and
3303 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3304 * the interrupt shadow. The stat.exits increment will do nicely.
3305 * But we need to prevent reordering, hence this barrier():
3306 */
3307 barrier();
3308
3309 kvm_guest_exit();
3310
3311 preempt_enable();
3312
3200f405
MT
3313 down_read(&vcpu->kvm->slots_lock);
3314
b6c7a5dc
HB
3315 /*
3316 * Profile KVM exit RIPs:
3317 */
3318 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3319 unsigned long rip = kvm_rip_read(vcpu);
3320 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3321 }
3322
298101da 3323
b93463aa
AK
3324 kvm_lapic_sync_from_vapic(vcpu);
3325
b6c7a5dc 3326 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3327out:
3328 return r;
3329}
b6c7a5dc 3330
09cec754 3331
d7690175
MT
3332static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3333{
3334 int r;
3335
3336 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3337 pr_debug("vcpu %d received sipi with vector # %x\n",
3338 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3339 kvm_lapic_reset(vcpu);
5f179287 3340 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3341 if (r)
3342 return r;
3343 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3344 }
3345
d7690175
MT
3346 down_read(&vcpu->kvm->slots_lock);
3347 vapic_enter(vcpu);
3348
3349 r = 1;
3350 while (r > 0) {
af2152f5 3351 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3352 r = vcpu_enter_guest(vcpu, kvm_run);
3353 else {
3354 up_read(&vcpu->kvm->slots_lock);
3355 kvm_vcpu_block(vcpu);
3356 down_read(&vcpu->kvm->slots_lock);
3357 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
09cec754
GN
3358 {
3359 switch(vcpu->arch.mp_state) {
3360 case KVM_MP_STATE_HALTED:
d7690175 3361 vcpu->arch.mp_state =
09cec754
GN
3362 KVM_MP_STATE_RUNNABLE;
3363 case KVM_MP_STATE_RUNNABLE:
3364 break;
3365 case KVM_MP_STATE_SIPI_RECEIVED:
3366 default:
3367 r = -EINTR;
3368 break;
3369 }
3370 }
d7690175
MT
3371 }
3372
09cec754
GN
3373 if (r <= 0)
3374 break;
3375
3376 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3377 if (kvm_cpu_has_pending_timer(vcpu))
3378 kvm_inject_pending_timer_irqs(vcpu);
3379
3380 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3381 r = -EINTR;
3382 kvm_run->exit_reason = KVM_EXIT_INTR;
3383 ++vcpu->stat.request_irq_exits;
3384 }
3385 if (signal_pending(current)) {
3386 r = -EINTR;
3387 kvm_run->exit_reason = KVM_EXIT_INTR;
3388 ++vcpu->stat.signal_exits;
3389 }
3390 if (need_resched()) {
3391 up_read(&vcpu->kvm->slots_lock);
3392 kvm_resched(vcpu);
3393 down_read(&vcpu->kvm->slots_lock);
d7690175 3394 }
b6c7a5dc
HB
3395 }
3396
d7690175 3397 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3398 post_kvm_run_save(vcpu, kvm_run);
3399
b93463aa
AK
3400 vapic_exit(vcpu);
3401
b6c7a5dc
HB
3402 return r;
3403}
3404
3405int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3406{
3407 int r;
3408 sigset_t sigsaved;
3409
3410 vcpu_load(vcpu);
3411
ac9f6dc0
AK
3412 if (vcpu->sigset_active)
3413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3414
a4535290 3415 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3416 kvm_vcpu_block(vcpu);
d7690175 3417 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3418 r = -EAGAIN;
3419 goto out;
b6c7a5dc
HB
3420 }
3421
b6c7a5dc
HB
3422 /* re-sync apic's tpr */
3423 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3424 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3425
ad312c7c 3426 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3427 r = complete_pio(vcpu);
3428 if (r)
3429 goto out;
3430 }
3431#if CONFIG_HAS_IOMEM
3432 if (vcpu->mmio_needed) {
3433 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3434 vcpu->mmio_read_completed = 1;
3435 vcpu->mmio_needed = 0;
3200f405
MT
3436
3437 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3438 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3439 vcpu->arch.mmio_fault_cr2, 0,
3440 EMULTYPE_NO_DECODE);
3200f405 3441 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3442 if (r == EMULATE_DO_MMIO) {
3443 /*
3444 * Read-modify-write. Back to userspace.
3445 */
3446 r = 0;
3447 goto out;
3448 }
3449 }
3450#endif
5fdbf976
MT
3451 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3452 kvm_register_write(vcpu, VCPU_REGS_RAX,
3453 kvm_run->hypercall.ret);
b6c7a5dc
HB
3454
3455 r = __vcpu_run(vcpu, kvm_run);
3456
3457out:
3458 if (vcpu->sigset_active)
3459 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3460
3461 vcpu_put(vcpu);
3462 return r;
3463}
3464
3465int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3466{
3467 vcpu_load(vcpu);
3468
5fdbf976
MT
3469 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3470 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3471 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3472 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3473 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3474 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3475 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3476 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3477#ifdef CONFIG_X86_64
5fdbf976
MT
3478 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3479 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3480 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3481 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3482 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3483 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3484 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3485 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3486#endif
3487
5fdbf976 3488 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3489 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3490
3491 /*
3492 * Don't leak debug flags in case they were set for guest debugging
3493 */
d0bfb940 3494 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3495 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3496
3497 vcpu_put(vcpu);
3498
3499 return 0;
3500}
3501
3502int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3503{
3504 vcpu_load(vcpu);
3505
5fdbf976
MT
3506 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3507 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3508 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3509 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3510 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3511 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3512 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3513 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3514#ifdef CONFIG_X86_64
5fdbf976
MT
3515 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3516 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3517 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3518 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3519 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3520 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3521 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3522 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3523
b6c7a5dc
HB
3524#endif
3525
5fdbf976 3526 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3527 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3528
b6c7a5dc 3529
b4f14abd
JK
3530 vcpu->arch.exception.pending = false;
3531
b6c7a5dc
HB
3532 vcpu_put(vcpu);
3533
3534 return 0;
3535}
3536
3e6e0aab
GT
3537void kvm_get_segment(struct kvm_vcpu *vcpu,
3538 struct kvm_segment *var, int seg)
b6c7a5dc 3539{
14af3f3c 3540 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3541}
3542
3543void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3544{
3545 struct kvm_segment cs;
3546
3e6e0aab 3547 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3548 *db = cs.db;
3549 *l = cs.l;
3550}
3551EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3552
3553int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3554 struct kvm_sregs *sregs)
3555{
3556 struct descriptor_table dt;
b6c7a5dc
HB
3557
3558 vcpu_load(vcpu);
3559
3e6e0aab
GT
3560 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3561 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3562 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3563 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3564 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3565 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3566
3e6e0aab
GT
3567 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3568 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3569
3570 kvm_x86_ops->get_idt(vcpu, &dt);
3571 sregs->idt.limit = dt.limit;
3572 sregs->idt.base = dt.base;
3573 kvm_x86_ops->get_gdt(vcpu, &dt);
3574 sregs->gdt.limit = dt.limit;
3575 sregs->gdt.base = dt.base;
3576
3577 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3578 sregs->cr0 = vcpu->arch.cr0;
3579 sregs->cr2 = vcpu->arch.cr2;
3580 sregs->cr3 = vcpu->arch.cr3;
3581 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3582 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3583 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3584 sregs->apic_base = kvm_get_apic_base(vcpu);
3585
16d7a191 3586 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc
HB
3587 memset(sregs->interrupt_bitmap, 0,
3588 sizeof sregs->interrupt_bitmap);
16d7a191 3589 else
ad312c7c 3590 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3591 sizeof sregs->interrupt_bitmap);
3592
14d0bc1f
GN
3593 if (vcpu->arch.interrupt.pending)
3594 set_bit(vcpu->arch.interrupt.nr,
3595 (unsigned long *)sregs->interrupt_bitmap);
16d7a191 3596
b6c7a5dc
HB
3597 vcpu_put(vcpu);
3598
3599 return 0;
3600}
3601
62d9f0db
MT
3602int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3603 struct kvm_mp_state *mp_state)
3604{
3605 vcpu_load(vcpu);
3606 mp_state->mp_state = vcpu->arch.mp_state;
3607 vcpu_put(vcpu);
3608 return 0;
3609}
3610
3611int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3612 struct kvm_mp_state *mp_state)
3613{
3614 vcpu_load(vcpu);
3615 vcpu->arch.mp_state = mp_state->mp_state;
3616 vcpu_put(vcpu);
3617 return 0;
3618}
3619
3e6e0aab 3620static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3621 struct kvm_segment *var, int seg)
3622{
14af3f3c 3623 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3624}
3625
37817f29
IE
3626static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3627 struct kvm_segment *kvm_desct)
3628{
3629 kvm_desct->base = seg_desc->base0;
3630 kvm_desct->base |= seg_desc->base1 << 16;
3631 kvm_desct->base |= seg_desc->base2 << 24;
3632 kvm_desct->limit = seg_desc->limit0;
3633 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3634 if (seg_desc->g) {
3635 kvm_desct->limit <<= 12;
3636 kvm_desct->limit |= 0xfff;
3637 }
37817f29
IE
3638 kvm_desct->selector = selector;
3639 kvm_desct->type = seg_desc->type;
3640 kvm_desct->present = seg_desc->p;
3641 kvm_desct->dpl = seg_desc->dpl;
3642 kvm_desct->db = seg_desc->d;
3643 kvm_desct->s = seg_desc->s;
3644 kvm_desct->l = seg_desc->l;
3645 kvm_desct->g = seg_desc->g;
3646 kvm_desct->avl = seg_desc->avl;
3647 if (!selector)
3648 kvm_desct->unusable = 1;
3649 else
3650 kvm_desct->unusable = 0;
3651 kvm_desct->padding = 0;
3652}
3653
b8222ad2
AS
3654static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3655 u16 selector,
3656 struct descriptor_table *dtable)
37817f29
IE
3657{
3658 if (selector & 1 << 2) {
3659 struct kvm_segment kvm_seg;
3660
3e6e0aab 3661 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3662
3663 if (kvm_seg.unusable)
3664 dtable->limit = 0;
3665 else
3666 dtable->limit = kvm_seg.limit;
3667 dtable->base = kvm_seg.base;
3668 }
3669 else
3670 kvm_x86_ops->get_gdt(vcpu, dtable);
3671}
3672
3673/* allowed just for 8 bytes segments */
3674static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3675 struct desc_struct *seg_desc)
3676{
98899aa0 3677 gpa_t gpa;
37817f29
IE
3678 struct descriptor_table dtable;
3679 u16 index = selector >> 3;
3680
b8222ad2 3681 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3682
3683 if (dtable.limit < index * 8 + 7) {
3684 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3685 return 1;
3686 }
98899aa0
MT
3687 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3688 gpa += index * 8;
3689 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3690}
3691
3692/* allowed just for 8 bytes segments */
3693static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3694 struct desc_struct *seg_desc)
3695{
98899aa0 3696 gpa_t gpa;
37817f29
IE
3697 struct descriptor_table dtable;
3698 u16 index = selector >> 3;
3699
b8222ad2 3700 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3701
3702 if (dtable.limit < index * 8 + 7)
3703 return 1;
98899aa0
MT
3704 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3705 gpa += index * 8;
3706 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3707}
3708
3709static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3710 struct desc_struct *seg_desc)
3711{
3712 u32 base_addr;
3713
3714 base_addr = seg_desc->base0;
3715 base_addr |= (seg_desc->base1 << 16);
3716 base_addr |= (seg_desc->base2 << 24);
3717
98899aa0 3718 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3719}
3720
37817f29
IE
3721static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3722{
3723 struct kvm_segment kvm_seg;
3724
3e6e0aab 3725 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3726 return kvm_seg.selector;
3727}
3728
3729static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3730 u16 selector,
3731 struct kvm_segment *kvm_seg)
3732{
3733 struct desc_struct seg_desc;
3734
3735 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3736 return 1;
3737 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3738 return 0;
3739}
3740
2259e3a7 3741static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3742{
3743 struct kvm_segment segvar = {
3744 .base = selector << 4,
3745 .limit = 0xffff,
3746 .selector = selector,
3747 .type = 3,
3748 .present = 1,
3749 .dpl = 3,
3750 .db = 0,
3751 .s = 1,
3752 .l = 0,
3753 .g = 0,
3754 .avl = 0,
3755 .unusable = 0,
3756 };
3757 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3758 return 0;
3759}
3760
3e6e0aab
GT
3761int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3762 int type_bits, int seg)
37817f29
IE
3763{
3764 struct kvm_segment kvm_seg;
3765
f4bbd9aa
AK
3766 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3767 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3768 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3769 return 1;
3770 kvm_seg.type |= type_bits;
3771
3772 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3773 seg != VCPU_SREG_LDTR)
3774 if (!kvm_seg.s)
3775 kvm_seg.unusable = 1;
3776
3e6e0aab 3777 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3778 return 0;
3779}
3780
3781static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3782 struct tss_segment_32 *tss)
3783{
3784 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3785 tss->eip = kvm_rip_read(vcpu);
37817f29 3786 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3787 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3788 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3789 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3790 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3791 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3792 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3793 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3794 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3795 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3796 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3797 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3798 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3799 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3800 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3801 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
37817f29
IE
3802}
3803
3804static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3805 struct tss_segment_32 *tss)
3806{
3807 kvm_set_cr3(vcpu, tss->cr3);
3808
5fdbf976 3809 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
3810 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3811
5fdbf976
MT
3812 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3813 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3814 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3815 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3816 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3817 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3818 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3819 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 3820
3e6e0aab 3821 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
3822 return 1;
3823
3e6e0aab 3824 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3825 return 1;
3826
3e6e0aab 3827 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3828 return 1;
3829
3e6e0aab 3830 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3831 return 1;
3832
3e6e0aab 3833 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3834 return 1;
3835
3e6e0aab 3836 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
3837 return 1;
3838
3e6e0aab 3839 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
3840 return 1;
3841 return 0;
3842}
3843
3844static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3845 struct tss_segment_16 *tss)
3846{
5fdbf976 3847 tss->ip = kvm_rip_read(vcpu);
37817f29 3848 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3849 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3850 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3851 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3852 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3853 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3854 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3855 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3856 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3857
3858 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3859 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3860 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3861 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3862 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3863 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3864}
3865
3866static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3867 struct tss_segment_16 *tss)
3868{
5fdbf976 3869 kvm_rip_write(vcpu, tss->ip);
37817f29 3870 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
3871 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3872 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3873 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3874 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3875 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3876 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3877 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3878 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 3879
3e6e0aab 3880 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
3881 return 1;
3882
3e6e0aab 3883 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3884 return 1;
3885
3e6e0aab 3886 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3887 return 1;
3888
3e6e0aab 3889 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3890 return 1;
3891
3e6e0aab 3892 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3893 return 1;
3894 return 0;
3895}
3896
8b2cf73c 3897static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37
GN
3898 u16 old_tss_sel, u32 old_tss_base,
3899 struct desc_struct *nseg_desc)
37817f29
IE
3900{
3901 struct tss_segment_16 tss_segment_16;
3902 int ret = 0;
3903
34198bf8
MT
3904 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3905 sizeof tss_segment_16))
37817f29
IE
3906 goto out;
3907
3908 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 3909
34198bf8
MT
3910 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3911 sizeof tss_segment_16))
37817f29 3912 goto out;
34198bf8
MT
3913
3914 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3915 &tss_segment_16, sizeof tss_segment_16))
3916 goto out;
3917
b237ac37
GN
3918 if (old_tss_sel != 0xffff) {
3919 tss_segment_16.prev_task_link = old_tss_sel;
3920
3921 if (kvm_write_guest(vcpu->kvm,
3922 get_tss_base_addr(vcpu, nseg_desc),
3923 &tss_segment_16.prev_task_link,
3924 sizeof tss_segment_16.prev_task_link))
3925 goto out;
3926 }
3927
37817f29
IE
3928 if (load_state_from_tss16(vcpu, &tss_segment_16))
3929 goto out;
3930
3931 ret = 1;
3932out:
3933 return ret;
3934}
3935
8b2cf73c 3936static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
b237ac37 3937 u16 old_tss_sel, u32 old_tss_base,
37817f29
IE
3938 struct desc_struct *nseg_desc)
3939{
3940 struct tss_segment_32 tss_segment_32;
3941 int ret = 0;
3942
34198bf8
MT
3943 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3944 sizeof tss_segment_32))
37817f29
IE
3945 goto out;
3946
3947 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 3948
34198bf8
MT
3949 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3950 sizeof tss_segment_32))
3951 goto out;
3952
3953 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3954 &tss_segment_32, sizeof tss_segment_32))
37817f29 3955 goto out;
34198bf8 3956
b237ac37
GN
3957 if (old_tss_sel != 0xffff) {
3958 tss_segment_32.prev_task_link = old_tss_sel;
3959
3960 if (kvm_write_guest(vcpu->kvm,
3961 get_tss_base_addr(vcpu, nseg_desc),
3962 &tss_segment_32.prev_task_link,
3963 sizeof tss_segment_32.prev_task_link))
3964 goto out;
3965 }
3966
37817f29
IE
3967 if (load_state_from_tss32(vcpu, &tss_segment_32))
3968 goto out;
3969
3970 ret = 1;
3971out:
3972 return ret;
3973}
3974
3975int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3976{
3977 struct kvm_segment tr_seg;
3978 struct desc_struct cseg_desc;
3979 struct desc_struct nseg_desc;
3980 int ret = 0;
34198bf8
MT
3981 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3982 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 3983
34198bf8 3984 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 3985
34198bf8
MT
3986 /* FIXME: Handle errors. Failure to read either TSS or their
3987 * descriptors should generate a pagefault.
3988 */
37817f29
IE
3989 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3990 goto out;
3991
34198bf8 3992 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
3993 goto out;
3994
37817f29
IE
3995 if (reason != TASK_SWITCH_IRET) {
3996 int cpl;
3997
3998 cpl = kvm_x86_ops->get_cpl(vcpu);
3999 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4000 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4001 return 1;
4002 }
4003 }
4004
4005 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
4006 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4007 return 1;
4008 }
4009
4010 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 4011 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 4012 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
4013 }
4014
4015 if (reason == TASK_SWITCH_IRET) {
4016 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4017 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
4018 }
4019
64a7ec06
GN
4020 /* set back link to prev task only if NT bit is set in eflags
4021 note that old_tss_sel is not used afetr this point */
4022 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4023 old_tss_sel = 0xffff;
37817f29 4024
b237ac37
GN
4025 /* set back link to prev task only if NT bit is set in eflags
4026 note that old_tss_sel is not used afetr this point */
4027 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4028 old_tss_sel = 0xffff;
4029
37817f29 4030 if (nseg_desc.type & 8)
b237ac37
GN
4031 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4032 old_tss_base, &nseg_desc);
37817f29 4033 else
b237ac37
GN
4034 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4035 old_tss_base, &nseg_desc);
37817f29
IE
4036
4037 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
4038 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
4039 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
4040 }
4041
4042 if (reason != TASK_SWITCH_IRET) {
3fe913e7 4043 nseg_desc.type |= (1 << 1);
37817f29
IE
4044 save_guest_segment_descriptor(vcpu, tss_selector,
4045 &nseg_desc);
4046 }
4047
4048 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4049 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4050 tr_seg.type = 11;
3e6e0aab 4051 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 4052out:
37817f29
IE
4053 return ret;
4054}
4055EXPORT_SYMBOL_GPL(kvm_task_switch);
4056
b6c7a5dc
HB
4057int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4058 struct kvm_sregs *sregs)
4059{
4060 int mmu_reset_needed = 0;
4061 int i, pending_vec, max_bits;
4062 struct descriptor_table dt;
4063
4064 vcpu_load(vcpu);
4065
4066 dt.limit = sregs->idt.limit;
4067 dt.base = sregs->idt.base;
4068 kvm_x86_ops->set_idt(vcpu, &dt);
4069 dt.limit = sregs->gdt.limit;
4070 dt.base = sregs->gdt.base;
4071 kvm_x86_ops->set_gdt(vcpu, &dt);
4072
ad312c7c
ZX
4073 vcpu->arch.cr2 = sregs->cr2;
4074 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
59839dff
MT
4075
4076 down_read(&vcpu->kvm->slots_lock);
4077 if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
4078 vcpu->arch.cr3 = sregs->cr3;
4079 else
4080 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
4081 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc 4082
2d3ad1f4 4083 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 4084
ad312c7c 4085 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 4086 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
4087 kvm_set_apic_base(vcpu, sregs->apic_base);
4088
4089 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
4090
ad312c7c 4091 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 4092 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 4093 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 4094
ad312c7c 4095 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
4096 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
4097 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 4098 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
4099
4100 if (mmu_reset_needed)
4101 kvm_mmu_reset_context(vcpu);
4102
4103 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
4104 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
4105 sizeof vcpu->arch.irq_pending);
4106 vcpu->arch.irq_summary = 0;
4107 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
4108 if (vcpu->arch.irq_pending[i])
4109 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
4110 } else {
4111 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4112 pending_vec = find_first_bit(
4113 (const unsigned long *)sregs->interrupt_bitmap,
4114 max_bits);
4115 /* Only pending external irq is handled here */
4116 if (pending_vec < max_bits) {
14d0bc1f
GN
4117 kvm_queue_interrupt(vcpu, pending_vec);
4118 pr_debug("Set back pending irq %d\n", pending_vec);
b6c7a5dc 4119 }
e4825800 4120 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
4121 }
4122
3e6e0aab
GT
4123 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4124 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4125 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4126 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4127 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4128 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 4129
3e6e0aab
GT
4130 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4131 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 4132
9c3e4aab
MT
4133 /* Older userspace won't unhalt the vcpu on reset. */
4134 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
4135 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4136 !(vcpu->arch.cr0 & X86_CR0_PE))
4137 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4138
b6c7a5dc
HB
4139 vcpu_put(vcpu);
4140
4141 return 0;
4142}
4143
d0bfb940
JK
4144int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4145 struct kvm_guest_debug *dbg)
b6c7a5dc 4146{
ae675ef0 4147 int i, r;
b6c7a5dc
HB
4148
4149 vcpu_load(vcpu);
4150
ae675ef0
JK
4151 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4152 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4153 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4154 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4155 vcpu->arch.switch_db_regs =
4156 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4157 } else {
4158 for (i = 0; i < KVM_NR_DB_REGS; i++)
4159 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4160 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4161 }
4162
b6c7a5dc
HB
4163 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4164
d0bfb940
JK
4165 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4166 kvm_queue_exception(vcpu, DB_VECTOR);
4167 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4168 kvm_queue_exception(vcpu, BP_VECTOR);
4169
b6c7a5dc
HB
4170 vcpu_put(vcpu);
4171
4172 return r;
4173}
4174
d0752060
HB
4175/*
4176 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4177 * we have asm/x86/processor.h
4178 */
4179struct fxsave {
4180 u16 cwd;
4181 u16 swd;
4182 u16 twd;
4183 u16 fop;
4184 u64 rip;
4185 u64 rdp;
4186 u32 mxcsr;
4187 u32 mxcsr_mask;
4188 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4189#ifdef CONFIG_X86_64
4190 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4191#else
4192 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4193#endif
4194};
4195
8b006791
ZX
4196/*
4197 * Translate a guest virtual address to a guest physical address.
4198 */
4199int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4200 struct kvm_translation *tr)
4201{
4202 unsigned long vaddr = tr->linear_address;
4203 gpa_t gpa;
4204
4205 vcpu_load(vcpu);
72dc67a6 4206 down_read(&vcpu->kvm->slots_lock);
ad312c7c 4207 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 4208 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
4209 tr->physical_address = gpa;
4210 tr->valid = gpa != UNMAPPED_GVA;
4211 tr->writeable = 1;
4212 tr->usermode = 0;
8b006791
ZX
4213 vcpu_put(vcpu);
4214
4215 return 0;
4216}
4217
d0752060
HB
4218int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4219{
ad312c7c 4220 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4221
4222 vcpu_load(vcpu);
4223
4224 memcpy(fpu->fpr, fxsave->st_space, 128);
4225 fpu->fcw = fxsave->cwd;
4226 fpu->fsw = fxsave->swd;
4227 fpu->ftwx = fxsave->twd;
4228 fpu->last_opcode = fxsave->fop;
4229 fpu->last_ip = fxsave->rip;
4230 fpu->last_dp = fxsave->rdp;
4231 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4232
4233 vcpu_put(vcpu);
4234
4235 return 0;
4236}
4237
4238int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4239{
ad312c7c 4240 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4241
4242 vcpu_load(vcpu);
4243
4244 memcpy(fxsave->st_space, fpu->fpr, 128);
4245 fxsave->cwd = fpu->fcw;
4246 fxsave->swd = fpu->fsw;
4247 fxsave->twd = fpu->ftwx;
4248 fxsave->fop = fpu->last_opcode;
4249 fxsave->rip = fpu->last_ip;
4250 fxsave->rdp = fpu->last_dp;
4251 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4252
4253 vcpu_put(vcpu);
4254
4255 return 0;
4256}
4257
4258void fx_init(struct kvm_vcpu *vcpu)
4259{
4260 unsigned after_mxcsr_mask;
4261
bc1a34f1
AA
4262 /*
4263 * Touch the fpu the first time in non atomic context as if
4264 * this is the first fpu instruction the exception handler
4265 * will fire before the instruction returns and it'll have to
4266 * allocate ram with GFP_KERNEL.
4267 */
4268 if (!used_math())
d6e88aec 4269 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4270
d0752060
HB
4271 /* Initialize guest FPU by resetting ours and saving into guest's */
4272 preempt_disable();
d6e88aec
AK
4273 kvm_fx_save(&vcpu->arch.host_fx_image);
4274 kvm_fx_finit();
4275 kvm_fx_save(&vcpu->arch.guest_fx_image);
4276 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4277 preempt_enable();
4278
ad312c7c 4279 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4280 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4281 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4282 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4283 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4284}
4285EXPORT_SYMBOL_GPL(fx_init);
4286
4287void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4288{
4289 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4290 return;
4291
4292 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4293 kvm_fx_save(&vcpu->arch.host_fx_image);
4294 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4295}
4296EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4297
4298void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4299{
4300 if (!vcpu->guest_fpu_loaded)
4301 return;
4302
4303 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4304 kvm_fx_save(&vcpu->arch.guest_fx_image);
4305 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4306 ++vcpu->stat.fpu_reload;
d0752060
HB
4307}
4308EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4309
4310void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4311{
7f1ea208
JR
4312 if (vcpu->arch.time_page) {
4313 kvm_release_page_dirty(vcpu->arch.time_page);
4314 vcpu->arch.time_page = NULL;
4315 }
4316
e9b11c17
ZX
4317 kvm_x86_ops->vcpu_free(vcpu);
4318}
4319
4320struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4321 unsigned int id)
4322{
26e5215f
AK
4323 return kvm_x86_ops->vcpu_create(kvm, id);
4324}
e9b11c17 4325
26e5215f
AK
4326int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4327{
4328 int r;
e9b11c17
ZX
4329
4330 /* We do fxsave: this must be aligned. */
ad312c7c 4331 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4332
0bed3b56 4333 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4334 vcpu_load(vcpu);
4335 r = kvm_arch_vcpu_reset(vcpu);
4336 if (r == 0)
4337 r = kvm_mmu_setup(vcpu);
4338 vcpu_put(vcpu);
4339 if (r < 0)
4340 goto free_vcpu;
4341
26e5215f 4342 return 0;
e9b11c17
ZX
4343free_vcpu:
4344 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4345 return r;
e9b11c17
ZX
4346}
4347
d40ccc62 4348void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4349{
4350 vcpu_load(vcpu);
4351 kvm_mmu_unload(vcpu);
4352 vcpu_put(vcpu);
4353
4354 kvm_x86_ops->vcpu_free(vcpu);
4355}
4356
4357int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4358{
448fa4a9
JK
4359 vcpu->arch.nmi_pending = false;
4360 vcpu->arch.nmi_injected = false;
4361
42dbaa5a
JK
4362 vcpu->arch.switch_db_regs = 0;
4363 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4364 vcpu->arch.dr6 = DR6_FIXED_1;
4365 vcpu->arch.dr7 = DR7_FIXED_1;
4366
e9b11c17
ZX
4367 return kvm_x86_ops->vcpu_reset(vcpu);
4368}
4369
4370void kvm_arch_hardware_enable(void *garbage)
4371{
4372 kvm_x86_ops->hardware_enable(garbage);
4373}
4374
4375void kvm_arch_hardware_disable(void *garbage)
4376{
4377 kvm_x86_ops->hardware_disable(garbage);
4378}
4379
4380int kvm_arch_hardware_setup(void)
4381{
4382 return kvm_x86_ops->hardware_setup();
4383}
4384
4385void kvm_arch_hardware_unsetup(void)
4386{
4387 kvm_x86_ops->hardware_unsetup();
4388}
4389
4390void kvm_arch_check_processor_compat(void *rtn)
4391{
4392 kvm_x86_ops->check_processor_compatibility(rtn);
4393}
4394
4395int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4396{
4397 struct page *page;
4398 struct kvm *kvm;
4399 int r;
4400
4401 BUG_ON(vcpu->kvm == NULL);
4402 kvm = vcpu->kvm;
4403
ad312c7c 4404 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 4405 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 4406 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4407 else
a4535290 4408 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4409
4410 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4411 if (!page) {
4412 r = -ENOMEM;
4413 goto fail;
4414 }
ad312c7c 4415 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4416
4417 r = kvm_mmu_create(vcpu);
4418 if (r < 0)
4419 goto fail_free_pio_data;
4420
4421 if (irqchip_in_kernel(kvm)) {
4422 r = kvm_create_lapic(vcpu);
4423 if (r < 0)
4424 goto fail_mmu_destroy;
4425 }
4426
4427 return 0;
4428
4429fail_mmu_destroy:
4430 kvm_mmu_destroy(vcpu);
4431fail_free_pio_data:
ad312c7c 4432 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4433fail:
4434 return r;
4435}
4436
4437void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4438{
4439 kvm_free_lapic(vcpu);
3200f405 4440 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4441 kvm_mmu_destroy(vcpu);
3200f405 4442 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4443 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4444}
d19a9cd2
ZX
4445
4446struct kvm *kvm_arch_create_vm(void)
4447{
4448 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4449
4450 if (!kvm)
4451 return ERR_PTR(-ENOMEM);
4452
f05e70ac 4453 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4d5c5d0f 4454 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4455
5550af4d
SY
4456 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4457 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4458
53f658b3
MT
4459 rdtscll(kvm->arch.vm_init_tsc);
4460
d19a9cd2
ZX
4461 return kvm;
4462}
4463
4464static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4465{
4466 vcpu_load(vcpu);
4467 kvm_mmu_unload(vcpu);
4468 vcpu_put(vcpu);
4469}
4470
4471static void kvm_free_vcpus(struct kvm *kvm)
4472{
4473 unsigned int i;
4474
4475 /*
4476 * Unpin any mmu pages first.
4477 */
4478 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4479 if (kvm->vcpus[i])
4480 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4481 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4482 if (kvm->vcpus[i]) {
4483 kvm_arch_vcpu_free(kvm->vcpus[i]);
4484 kvm->vcpus[i] = NULL;
4485 }
4486 }
4487
4488}
4489
ad8ba2cd
SY
4490void kvm_arch_sync_events(struct kvm *kvm)
4491{
ba4cef31 4492 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4493}
4494
d19a9cd2
ZX
4495void kvm_arch_destroy_vm(struct kvm *kvm)
4496{
6eb55818 4497 kvm_iommu_unmap_guest(kvm);
7837699f 4498 kvm_free_pit(kvm);
d7deeeb0
ZX
4499 kfree(kvm->arch.vpic);
4500 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4501 kvm_free_vcpus(kvm);
4502 kvm_free_physmem(kvm);
3d45830c
AK
4503 if (kvm->arch.apic_access_page)
4504 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4505 if (kvm->arch.ept_identity_pagetable)
4506 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4507 kfree(kvm);
4508}
0de10343
ZX
4509
4510int kvm_arch_set_memory_region(struct kvm *kvm,
4511 struct kvm_userspace_memory_region *mem,
4512 struct kvm_memory_slot old,
4513 int user_alloc)
4514{
4515 int npages = mem->memory_size >> PAGE_SHIFT;
4516 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4517
4518 /*To keep backward compatibility with older userspace,
4519 *x86 needs to hanlde !user_alloc case.
4520 */
4521 if (!user_alloc) {
4522 if (npages && !old.rmap) {
604b38ac
AA
4523 unsigned long userspace_addr;
4524
72dc67a6 4525 down_write(&current->mm->mmap_sem);
604b38ac
AA
4526 userspace_addr = do_mmap(NULL, 0,
4527 npages * PAGE_SIZE,
4528 PROT_READ | PROT_WRITE,
acee3c04 4529 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4530 0);
72dc67a6 4531 up_write(&current->mm->mmap_sem);
0de10343 4532
604b38ac
AA
4533 if (IS_ERR((void *)userspace_addr))
4534 return PTR_ERR((void *)userspace_addr);
4535
4536 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4537 spin_lock(&kvm->mmu_lock);
4538 memslot->userspace_addr = userspace_addr;
4539 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4540 } else {
4541 if (!old.user_alloc && old.rmap) {
4542 int ret;
4543
72dc67a6 4544 down_write(&current->mm->mmap_sem);
0de10343
ZX
4545 ret = do_munmap(current->mm, old.userspace_addr,
4546 old.npages * PAGE_SIZE);
72dc67a6 4547 up_write(&current->mm->mmap_sem);
0de10343
ZX
4548 if (ret < 0)
4549 printk(KERN_WARNING
4550 "kvm_vm_ioctl_set_memory_region: "
4551 "failed to munmap memory\n");
4552 }
4553 }
4554 }
4555
7c8a83b7 4556 spin_lock(&kvm->mmu_lock);
f05e70ac 4557 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4558 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4559 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4560 }
4561
4562 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
7c8a83b7 4563 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4564 kvm_flush_remote_tlbs(kvm);
4565
4566 return 0;
4567}
1d737c8a 4568
34d4cb8f
MT
4569void kvm_arch_flush_shadow(struct kvm *kvm)
4570{
4571 kvm_mmu_zap_all(kvm);
8986ecc0 4572 kvm_reload_remote_mmus(kvm);
34d4cb8f
MT
4573}
4574
1d737c8a
ZX
4575int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4576{
a4535290 4577 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4578 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4579 || vcpu->arch.nmi_pending;
1d737c8a 4580}
5736199a 4581
5736199a
ZX
4582void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4583{
32f88400
MT
4584 int me;
4585 int cpu = vcpu->cpu;
5736199a
ZX
4586
4587 if (waitqueue_active(&vcpu->wq)) {
4588 wake_up_interruptible(&vcpu->wq);
4589 ++vcpu->stat.halt_wakeup;
4590 }
32f88400
MT
4591
4592 me = get_cpu();
4593 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
4594 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
4595 smp_send_reschedule(cpu);
e9571ed5 4596 put_cpu();
5736199a 4597}
78646121
GN
4598
4599int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4600{
4601 return kvm_x86_ops->interrupt_allowed(vcpu);
4602}