]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kvm/kvm-ia64.c
KVM: ia64: Map in SN2 RTC registers to the VMM module
[net-next-2.6.git] / arch / ia64 / kvm / kvm-ia64.c
CommitLineData
b024b793
XZ
1/*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
3 *
4 *
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
19de40a8 34#include <linux/iommu.h>
2381ad24 35#include <linux/intel-iommu.h>
b024b793
XZ
36
37#include <asm/pgtable.h>
38#include <asm/gcc_intrin.h>
39#include <asm/pal.h>
40#include <asm/cacheflush.h>
41#include <asm/div64.h>
42#include <asm/tlb.h>
9f726323 43#include <asm/elf.h>
0c72ea7f
JS
44#include <asm/sn/addrs.h>
45#include <asm/sn/clksupport.h>
46#include <asm/sn/shub_mmr.h>
b024b793
XZ
47
48#include "misc.h"
49#include "vti.h"
50#include "iodev.h"
51#include "ioapic.h"
52#include "lapic.h"
2f749771 53#include "irq.h"
b024b793
XZ
54
55static unsigned long kvm_vmm_base;
56static unsigned long kvm_vsa_base;
57static unsigned long kvm_vm_buffer;
58static unsigned long kvm_vm_buffer_size;
59unsigned long kvm_vmm_gp;
60
61static long vp_env_info;
62
63static struct kvm_vmm_info *kvm_vmm_info;
64
65static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
66
67struct kvm_stats_debugfs_item debugfs_entries[] = {
68 { NULL }
69};
70
b024b793
XZ
71static void kvm_flush_icache(unsigned long start, unsigned long len)
72{
73 int l;
74
75 for (l = 0; l < (len + 32); l += 32)
7120569c 76 ia64_fc((void *)(start + l));
b024b793
XZ
77
78 ia64_sync_i();
79 ia64_srlz_i();
80}
81
82static void kvm_flush_tlb_all(void)
83{
84 unsigned long i, j, count0, count1, stride0, stride1, addr;
85 long flags;
86
87 addr = local_cpu_data->ptce_base;
88 count0 = local_cpu_data->ptce_count[0];
89 count1 = local_cpu_data->ptce_count[1];
90 stride0 = local_cpu_data->ptce_stride[0];
91 stride1 = local_cpu_data->ptce_stride[1];
92
93 local_irq_save(flags);
94 for (i = 0; i < count0; ++i) {
95 for (j = 0; j < count1; ++j) {
96 ia64_ptce(addr);
97 addr += stride1;
98 }
99 addr += stride0;
100 }
101 local_irq_restore(flags);
102 ia64_srlz_i(); /* srlz.i implies srlz.d */
103}
104
105long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
106{
107 struct ia64_pal_retval iprv;
108
109 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
110 (u64)opt_handler);
111
112 return iprv.status;
113}
114
115static DEFINE_SPINLOCK(vp_lock);
116
117void kvm_arch_hardware_enable(void *garbage)
118{
119 long status;
120 long tmp_base;
121 unsigned long pte;
122 unsigned long saved_psr;
123 int slot;
124
0c72ea7f 125 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
b024b793
XZ
126 local_irq_save(saved_psr);
127 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
cab7a1ee 128 local_irq_restore(saved_psr);
b024b793
XZ
129 if (slot < 0)
130 return;
b024b793
XZ
131
132 spin_lock(&vp_lock);
133 status = ia64_pal_vp_init_env(kvm_vsa_base ?
134 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
135 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
136 if (status != 0) {
137 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
138 return ;
139 }
140
141 if (!kvm_vsa_base) {
142 kvm_vsa_base = tmp_base;
143 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
144 }
145 spin_unlock(&vp_lock);
146 ia64_ptr_entry(0x3, slot);
147}
148
149void kvm_arch_hardware_disable(void *garbage)
150{
151
152 long status;
153 int slot;
154 unsigned long pte;
155 unsigned long saved_psr;
156 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
157
158 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
159 PAGE_KERNEL));
160
161 local_irq_save(saved_psr);
162 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
cab7a1ee 163 local_irq_restore(saved_psr);
b024b793
XZ
164 if (slot < 0)
165 return;
b024b793
XZ
166
167 status = ia64_pal_vp_exit_env(host_iva);
168 if (status)
169 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
170 status);
171 ia64_ptr_entry(0x3, slot);
172}
173
174void kvm_arch_check_processor_compat(void *rtn)
175{
176 *(int *)rtn = 0;
177}
178
179int kvm_dev_ioctl_check_extension(long ext)
180{
181
182 int r;
183
184 switch (ext) {
185 case KVM_CAP_IRQCHIP:
8c4b537d 186 case KVM_CAP_MP_STATE:
4925663a 187 case KVM_CAP_IRQ_INJECT_STATUS:
b024b793
XZ
188 r = 1;
189 break;
7f39f8ac
LV
190 case KVM_CAP_COALESCED_MMIO:
191 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
192 break;
2381ad24 193 case KVM_CAP_IOMMU:
19de40a8 194 r = iommu_found();
2381ad24 195 break;
b024b793
XZ
196 default:
197 r = 0;
198 }
199 return r;
200
201}
202
203static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499 204 gpa_t addr, int len, int is_write)
b024b793
XZ
205{
206 struct kvm_io_device *dev;
207
92760499 208 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
b024b793
XZ
209
210 return dev;
211}
212
213static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
214{
215 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
216 kvm_run->hw.hardware_exit_reason = 1;
217 return 0;
218}
219
220static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
221{
222 struct kvm_mmio_req *p;
223 struct kvm_io_device *mmio_dev;
224
225 p = kvm_get_vcpu_ioreq(vcpu);
226
227 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
228 goto mmio;
229 vcpu->mmio_needed = 1;
230 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
231 vcpu->mmio_size = kvm_run->mmio.len = p->size;
232 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
233
234 if (vcpu->mmio_is_write)
235 memcpy(vcpu->mmio_data, &p->data, p->size);
236 memcpy(kvm_run->mmio.data, &p->data, p->size);
237 kvm_run->exit_reason = KVM_EXIT_MMIO;
238 return 0;
239mmio:
92760499 240 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
b024b793
XZ
241 if (mmio_dev) {
242 if (!p->dir)
243 kvm_iodevice_write(mmio_dev, p->addr, p->size,
244 &p->data);
245 else
246 kvm_iodevice_read(mmio_dev, p->addr, p->size,
247 &p->data);
248
249 } else
250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
251 p->state = STATE_IORESP_READY;
252
253 return 1;
254}
255
256static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
257{
258 struct exit_ctl_data *p;
259
260 p = kvm_get_exit_data(vcpu);
261
262 if (p->exit_reason == EXIT_REASON_PAL_CALL)
263 return kvm_pal_emul(vcpu, kvm_run);
264 else {
265 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
266 kvm_run->hw.hardware_exit_reason = 2;
267 return 0;
268 }
269}
270
271static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
272{
273 struct exit_ctl_data *p;
274
275 p = kvm_get_exit_data(vcpu);
276
277 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
278 kvm_sal_emul(vcpu);
279 return 1;
280 } else {
281 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
282 kvm_run->hw.hardware_exit_reason = 3;
283 return 0;
284 }
285
286}
287
58c2dde1
GN
288static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
289{
290 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
291
292 if (!test_and_set_bit(vector, &vpd->irr[0])) {
293 vcpu->arch.irq_new_pending = 1;
294 kvm_vcpu_kick(vcpu);
295 return 1;
296 }
297 return 0;
298}
299
b024b793
XZ
300/*
301 * offset: address offset to IPI space.
302 * value: deliver value.
303 */
304static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
305 uint64_t vector)
306{
307 switch (dm) {
308 case SAPIC_FIXED:
b024b793
XZ
309 break;
310 case SAPIC_NMI:
58c2dde1 311 vector = 2;
b024b793
XZ
312 break;
313 case SAPIC_EXTINT:
58c2dde1 314 vector = 0;
b024b793
XZ
315 break;
316 case SAPIC_INIT:
317 case SAPIC_PMI:
318 default:
319 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
58c2dde1 320 return;
b024b793 321 }
58c2dde1 322 __apic_accept_irq(vcpu, vector);
b024b793
XZ
323}
324
325static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
326 unsigned long eid)
327{
328 union ia64_lid lid;
329 int i;
330
934d534f 331 for (i = 0; i < kvm->arch.online_vcpus; i++) {
b024b793
XZ
332 if (kvm->vcpus[i]) {
333 lid.val = VCPU_LID(kvm->vcpus[i]);
334 if (lid.id == id && lid.eid == eid)
335 return kvm->vcpus[i];
336 }
337 }
338
339 return NULL;
340}
341
342static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
343{
344 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
345 struct kvm_vcpu *target_vcpu;
346 struct kvm_pt_regs *regs;
347 union ia64_ipi_a addr = p->u.ipi_data.addr;
348 union ia64_ipi_d data = p->u.ipi_data.data;
349
350 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
351 if (!target_vcpu)
352 return handle_vm_error(vcpu, kvm_run);
353
354 if (!target_vcpu->arch.launched) {
355 regs = vcpu_regs(target_vcpu);
356
357 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
358 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
359
a4535290 360 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
361 if (waitqueue_active(&target_vcpu->wq))
362 wake_up_interruptible(&target_vcpu->wq);
363 } else {
364 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
365 if (target_vcpu != vcpu)
366 kvm_vcpu_kick(target_vcpu);
367 }
368
369 return 1;
370}
371
372struct call_data {
373 struct kvm_ptc_g ptc_g_data;
374 struct kvm_vcpu *vcpu;
375};
376
377static void vcpu_global_purge(void *info)
378{
379 struct call_data *p = (struct call_data *)info;
380 struct kvm_vcpu *vcpu = p->vcpu;
381
382 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
383 return;
384
385 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
386 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
387 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
388 p->ptc_g_data;
389 } else {
390 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
391 vcpu->arch.ptc_g_count = 0;
392 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
393 }
394}
395
396static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
397{
398 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
399 struct kvm *kvm = vcpu->kvm;
400 struct call_data call_data;
401 int i;
decc9016 402
b024b793
XZ
403 call_data.ptc_g_data = p->u.ptc_g_data;
404
934d534f 405 for (i = 0; i < kvm->arch.online_vcpus; i++) {
b024b793 406 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
a4535290 407 KVM_MP_STATE_UNINITIALIZED ||
b024b793
XZ
408 vcpu == kvm->vcpus[i])
409 continue;
410
411 if (waitqueue_active(&kvm->vcpus[i]->wq))
412 wake_up_interruptible(&kvm->vcpus[i]->wq);
413
414 if (kvm->vcpus[i]->cpu != -1) {
415 call_data.vcpu = kvm->vcpus[i];
416 smp_call_function_single(kvm->vcpus[i]->cpu,
2f73ccab 417 vcpu_global_purge, &call_data, 1);
b024b793
XZ
418 } else
419 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
420
421 }
422 return 1;
423}
424
425static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
426{
427 return 1;
428}
429
0c72ea7f
JS
430static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
431{
432 unsigned long pte, rtc_phys_addr, map_addr;
433 int slot;
434
435 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
436 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
437 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
438 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
439 vcpu->arch.sn_rtc_tr_slot = slot;
440 if (slot < 0) {
441 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
442 slot = 0;
443 }
444 return slot;
445}
446
b024b793
XZ
447int kvm_emulate_halt(struct kvm_vcpu *vcpu)
448{
449
450 ktime_t kt;
451 long itc_diff;
452 unsigned long vcpu_now_itc;
b024b793
XZ
453 unsigned long expires;
454 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
455 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
456 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
457
decc9016 458 if (irqchip_in_kernel(vcpu->kvm)) {
b024b793 459
decc9016 460 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
b024b793 461
decc9016
XZ
462 if (time_after(vcpu_now_itc, vpd->itm)) {
463 vcpu->arch.timer_check = 1;
464 return 1;
465 }
466 itc_diff = vpd->itm - vcpu_now_itc;
467 if (itc_diff < 0)
468 itc_diff = -itc_diff;
469
470 expires = div64_u64(itc_diff, cyc_per_usec);
471 kt = ktime_set(0, 1000 * expires);
472
decc9016
XZ
473 vcpu->arch.ht_active = 1;
474 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
b024b793 475
a4535290 476 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
b024b793
XZ
477 kvm_vcpu_block(vcpu);
478 hrtimer_cancel(p_ht);
479 vcpu->arch.ht_active = 0;
480
decc9016
XZ
481 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
482 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
483 vcpu->arch.mp_state =
484 KVM_MP_STATE_RUNNABLE;
decc9016 485
a4535290 486 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
b024b793
XZ
487 return -EINTR;
488 return 1;
489 } else {
490 printk(KERN_ERR"kvm: Unsupported userspace halt!");
491 return 0;
492 }
493}
494
495static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
496 struct kvm_run *kvm_run)
497{
498 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
499 return 0;
500}
501
502static int handle_external_interrupt(struct kvm_vcpu *vcpu,
503 struct kvm_run *kvm_run)
504{
505 return 1;
506}
507
7d637978
XZ
508static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
509 struct kvm_run *kvm_run)
510{
511 printk("VMM: %s", vcpu->arch.log_buf);
512 return 1;
513}
514
b024b793
XZ
515static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
516 struct kvm_run *kvm_run) = {
517 [EXIT_REASON_VM_PANIC] = handle_vm_error,
518 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
519 [EXIT_REASON_PAL_CALL] = handle_pal_call,
520 [EXIT_REASON_SAL_CALL] = handle_sal_call,
521 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
522 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
523 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
524 [EXIT_REASON_IPI] = handle_ipi,
525 [EXIT_REASON_PTC_G] = handle_global_purge,
7d637978 526 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
b024b793
XZ
527
528};
529
530static const int kvm_vti_max_exit_handlers =
531 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
532
b024b793
XZ
533static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
534{
535 struct exit_ctl_data *p_exit_data;
536
537 p_exit_data = kvm_get_exit_data(vcpu);
538 return p_exit_data->exit_reason;
539}
540
541/*
542 * The guest has exited. See if we can fix it or if we need userspace
543 * assistance.
544 */
545static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
546{
547 u32 exit_reason = kvm_get_exit_reason(vcpu);
548 vcpu->arch.last_exit = exit_reason;
549
550 if (exit_reason < kvm_vti_max_exit_handlers
551 && kvm_vti_exit_handlers[exit_reason])
552 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
553 else {
554 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
555 kvm_run->hw.hardware_exit_reason = exit_reason;
556 }
557 return 0;
558}
559
560static inline void vti_set_rr6(unsigned long rr6)
561{
562 ia64_set_rr(RR6, rr6);
563 ia64_srlz_i();
564}
565
566static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
567{
568 unsigned long pte;
569 struct kvm *kvm = vcpu->kvm;
570 int r;
571
572 /*Insert a pair of tr to map vmm*/
573 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
574 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
575 if (r < 0)
576 goto out;
577 vcpu->arch.vmm_tr_slot = r;
578 /*Insert a pairt of tr to map data of vm*/
579 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
580 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
581 pte, KVM_VM_DATA_SHIFT);
582 if (r < 0)
583 goto out;
584 vcpu->arch.vm_tr_slot = r;
0c72ea7f
JS
585
586#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
587 if (kvm->arch.is_sn2) {
588 r = kvm_sn2_setup_mappings(vcpu);
589 if (r < 0)
590 goto out;
591 }
592#endif
593
b024b793
XZ
594 r = 0;
595out:
596 return r;
b024b793
XZ
597}
598
599static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
600{
0c72ea7f 601 struct kvm *kvm = vcpu->kvm;
b024b793
XZ
602 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
603 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
0c72ea7f
JS
604#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
605 if (kvm->arch.is_sn2)
606 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
607#endif
b024b793
XZ
608}
609
610static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
611{
612 int cpu = smp_processor_id();
613
614 if (vcpu->arch.last_run_cpu != cpu ||
615 per_cpu(last_vcpu, cpu) != vcpu) {
616 per_cpu(last_vcpu, cpu) = vcpu;
617 vcpu->arch.last_run_cpu = cpu;
618 kvm_flush_tlb_all();
619 }
620
621 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
622 vti_set_rr6(vcpu->arch.vmm_rr);
623 return kvm_insert_vmm_mapping(vcpu);
624}
625static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
626{
627 kvm_purge_vmm_mapping(vcpu);
628 vti_set_rr6(vcpu->arch.host_rr6);
629}
630
631static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
632{
633 union context *host_ctx, *guest_ctx;
634 int r;
635
636 /*Get host and guest context with guest address space.*/
637 host_ctx = kvm_get_host_context(vcpu);
638 guest_ctx = kvm_get_guest_context(vcpu);
639
640 r = kvm_vcpu_pre_transition(vcpu);
641 if (r < 0)
642 goto out;
643 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
644 kvm_vcpu_post_transition(vcpu);
645 r = 0;
646out:
647 return r;
648}
649
650static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
651{
652 int r;
653
654again:
b024b793 655 if (signal_pending(current)) {
b024b793
XZ
656 r = -EINTR;
657 kvm_run->exit_reason = KVM_EXIT_INTR;
658 goto out;
659 }
660
d24d2c1c
JS
661 /*
662 * down_read() may sleep and return with interrupts enabled
663 */
664 down_read(&vcpu->kvm->slots_lock);
665
666 preempt_disable();
667 local_irq_disable();
668
b024b793
XZ
669 vcpu->guest_mode = 1;
670 kvm_guest_enter();
b024b793
XZ
671 r = vti_vcpu_run(vcpu, kvm_run);
672 if (r < 0) {
673 local_irq_enable();
674 preempt_enable();
675 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
676 goto out;
677 }
678
679 vcpu->arch.launched = 1;
680 vcpu->guest_mode = 0;
681 local_irq_enable();
682
683 /*
684 * We must have an instruction between local_irq_enable() and
685 * kvm_guest_exit(), so the timer interrupt isn't delayed by
686 * the interrupt shadow. The stat.exits increment will do nicely.
687 * But we need to prevent reordering, hence this barrier():
688 */
689 barrier();
b024b793 690 kvm_guest_exit();
decc9016 691 up_read(&vcpu->kvm->slots_lock);
b024b793
XZ
692 preempt_enable();
693
694 r = kvm_handle_exit(kvm_run, vcpu);
695
696 if (r > 0) {
697 if (!need_resched())
698 goto again;
699 }
700
701out:
702 if (r > 0) {
703 kvm_resched(vcpu);
704 goto again;
705 }
706
707 return r;
708}
709
710static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
711{
712 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
713
714 if (!vcpu->mmio_is_write)
715 memcpy(&p->data, vcpu->mmio_data, 8);
716 p->state = STATE_IORESP_READY;
717}
718
719int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
720{
721 int r;
722 sigset_t sigsaved;
723
724 vcpu_load(vcpu);
725
a2e4e289
XZ
726 if (vcpu->sigset_active)
727 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
728
a4535290 729 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b024b793 730 kvm_vcpu_block(vcpu);
decc9016 731 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
a2e4e289
XZ
732 r = -EAGAIN;
733 goto out;
b024b793
XZ
734 }
735
b024b793
XZ
736 if (vcpu->mmio_needed) {
737 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
738 kvm_set_mmio_data(vcpu);
739 vcpu->mmio_read_completed = 1;
740 vcpu->mmio_needed = 0;
741 }
742 r = __vcpu_run(vcpu, kvm_run);
a2e4e289 743out:
b024b793
XZ
744 if (vcpu->sigset_active)
745 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
746
747 vcpu_put(vcpu);
748 return r;
749}
750
b024b793
XZ
751static struct kvm *kvm_alloc_kvm(void)
752{
753
754 struct kvm *kvm;
755 uint64_t vm_base;
756
a917f7af
XZ
757 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
758
b024b793
XZ
759 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
760
761 if (!vm_base)
762 return ERR_PTR(-ENOMEM);
b024b793 763
b024b793 764 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
a917f7af
XZ
765 kvm = (struct kvm *)(vm_base +
766 offsetof(struct kvm_vm_data, kvm_vm_struct));
b024b793 767 kvm->arch.vm_base = vm_base;
a917f7af 768 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
b024b793
XZ
769
770 return kvm;
771}
772
773struct kvm_io_range {
774 unsigned long start;
775 unsigned long size;
776 unsigned long type;
777};
778
779static const struct kvm_io_range io_ranges[] = {
780 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
781 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
782 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
783 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
784 {PIB_START, PIB_SIZE, GPFN_PIB},
785};
786
787static void kvm_build_io_pmt(struct kvm *kvm)
788{
789 unsigned long i, j;
790
791 /* Mark I/O ranges */
792 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
793 i++) {
794 for (j = io_ranges[i].start;
795 j < io_ranges[i].start + io_ranges[i].size;
796 j += PAGE_SIZE)
797 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
798 io_ranges[i].type, 0);
799 }
800
801}
802
803/*Use unused rids to virtualize guest rid.*/
804#define GUEST_PHYSICAL_RR0 0x1739
805#define GUEST_PHYSICAL_RR4 0x2739
806#define VMM_INIT_RR 0x1660
807
808static void kvm_init_vm(struct kvm *kvm)
809{
b024b793
XZ
810 BUG_ON(!kvm);
811
812 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
813 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
814 kvm->arch.vmm_init_rr = VMM_INIT_RR;
815
b024b793
XZ
816 /*
817 *Fill P2M entries for MMIO/IO ranges
818 */
819 kvm_build_io_pmt(kvm);
820
2381ad24 821 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5550af4d
SY
822
823 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
824 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
b024b793
XZ
825}
826
827struct kvm *kvm_arch_create_vm(void)
828{
829 struct kvm *kvm = kvm_alloc_kvm();
830
831 if (IS_ERR(kvm))
832 return ERR_PTR(-ENOMEM);
0c72ea7f
JS
833
834 kvm->arch.is_sn2 = ia64_platform_is("sn2");
835
b024b793
XZ
836 kvm_init_vm(kvm);
837
934d534f
JS
838 kvm->arch.online_vcpus = 0;
839
b024b793
XZ
840 return kvm;
841
842}
843
844static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
845 struct kvm_irqchip *chip)
846{
847 int r;
848
849 r = 0;
850 switch (chip->chip_id) {
851 case KVM_IRQCHIP_IOAPIC:
852 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
853 sizeof(struct kvm_ioapic_state));
854 break;
855 default:
856 r = -EINVAL;
857 break;
858 }
859 return r;
860}
861
862static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
863{
864 int r;
865
866 r = 0;
867 switch (chip->chip_id) {
868 case KVM_IRQCHIP_IOAPIC:
869 memcpy(ioapic_irqchip(kvm),
870 &chip->chip.ioapic,
871 sizeof(struct kvm_ioapic_state));
872 break;
873 default:
874 r = -EINVAL;
875 break;
876 }
877 return r;
878}
879
880#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
881
882int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
883{
b024b793 884 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
042b26ed 885 int i;
b024b793
XZ
886
887 vcpu_load(vcpu);
888
889 for (i = 0; i < 16; i++) {
890 vpd->vgr[i] = regs->vpd.vgr[i];
891 vpd->vbgr[i] = regs->vpd.vbgr[i];
892 }
893 for (i = 0; i < 128; i++)
894 vpd->vcr[i] = regs->vpd.vcr[i];
895 vpd->vhpi = regs->vpd.vhpi;
896 vpd->vnat = regs->vpd.vnat;
897 vpd->vbnat = regs->vpd.vbnat;
898 vpd->vpsr = regs->vpd.vpsr;
899
900 vpd->vpr = regs->vpd.vpr;
901
042b26ed 902 memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
b024b793
XZ
903
904 RESTORE_REGS(mp_state);
905 RESTORE_REGS(vmm_rr);
906 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
907 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
908 RESTORE_REGS(itr_regions);
909 RESTORE_REGS(dtr_regions);
910 RESTORE_REGS(tc_regions);
911 RESTORE_REGS(irq_check);
912 RESTORE_REGS(itc_check);
913 RESTORE_REGS(timer_check);
914 RESTORE_REGS(timer_pending);
915 RESTORE_REGS(last_itc);
916 for (i = 0; i < 8; i++) {
917 vcpu->arch.vrr[i] = regs->vrr[i];
918 vcpu->arch.ibr[i] = regs->ibr[i];
919 vcpu->arch.dbr[i] = regs->dbr[i];
920 }
921 for (i = 0; i < 4; i++)
922 vcpu->arch.insvc[i] = regs->insvc[i];
923 RESTORE_REGS(xtp);
924 RESTORE_REGS(metaphysical_rr0);
925 RESTORE_REGS(metaphysical_rr4);
926 RESTORE_REGS(metaphysical_saved_rr0);
927 RESTORE_REGS(metaphysical_saved_rr4);
928 RESTORE_REGS(fp_psr);
929 RESTORE_REGS(saved_gp);
930
931 vcpu->arch.irq_new_pending = 1;
932 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
933 set_bit(KVM_REQ_RESUME, &vcpu->requests);
934
935 vcpu_put(vcpu);
042b26ed
JS
936
937 return 0;
b024b793
XZ
938}
939
940long kvm_arch_vm_ioctl(struct file *filp,
941 unsigned int ioctl, unsigned long arg)
942{
943 struct kvm *kvm = filp->private_data;
944 void __user *argp = (void __user *)arg;
945 int r = -EINVAL;
946
947 switch (ioctl) {
948 case KVM_SET_MEMORY_REGION: {
949 struct kvm_memory_region kvm_mem;
950 struct kvm_userspace_memory_region kvm_userspace_mem;
951
952 r = -EFAULT;
953 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
954 goto out;
955 kvm_userspace_mem.slot = kvm_mem.slot;
956 kvm_userspace_mem.flags = kvm_mem.flags;
957 kvm_userspace_mem.guest_phys_addr =
958 kvm_mem.guest_phys_addr;
959 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
960 r = kvm_vm_ioctl_set_memory_region(kvm,
961 &kvm_userspace_mem, 0);
962 if (r)
963 goto out;
964 break;
965 }
966 case KVM_CREATE_IRQCHIP:
967 r = -EFAULT;
968 r = kvm_ioapic_init(kvm);
969 if (r)
970 goto out;
399ec807
AK
971 r = kvm_setup_default_irq_routing(kvm);
972 if (r) {
973 kfree(kvm->arch.vioapic);
974 goto out;
975 }
b024b793 976 break;
4925663a 977 case KVM_IRQ_LINE_STATUS:
b024b793
XZ
978 case KVM_IRQ_LINE: {
979 struct kvm_irq_level irq_event;
980
981 r = -EFAULT;
982 if (copy_from_user(&irq_event, argp, sizeof irq_event))
983 goto out;
984 if (irqchip_in_kernel(kvm)) {
4925663a 985 __s32 status;
b024b793 986 mutex_lock(&kvm->lock);
4925663a 987 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
5550af4d 988 irq_event.irq, irq_event.level);
b024b793 989 mutex_unlock(&kvm->lock);
4925663a
GN
990 if (ioctl == KVM_IRQ_LINE_STATUS) {
991 irq_event.status = status;
992 if (copy_to_user(argp, &irq_event,
993 sizeof irq_event))
994 goto out;
995 }
b024b793
XZ
996 r = 0;
997 }
998 break;
999 }
1000 case KVM_GET_IRQCHIP: {
1001 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1002 struct kvm_irqchip chip;
1003
1004 r = -EFAULT;
1005 if (copy_from_user(&chip, argp, sizeof chip))
1006 goto out;
1007 r = -ENXIO;
1008 if (!irqchip_in_kernel(kvm))
1009 goto out;
1010 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1011 if (r)
1012 goto out;
1013 r = -EFAULT;
1014 if (copy_to_user(argp, &chip, sizeof chip))
1015 goto out;
1016 r = 0;
1017 break;
1018 }
1019 case KVM_SET_IRQCHIP: {
1020 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1021 struct kvm_irqchip chip;
1022
1023 r = -EFAULT;
1024 if (copy_from_user(&chip, argp, sizeof chip))
1025 goto out;
1026 r = -ENXIO;
1027 if (!irqchip_in_kernel(kvm))
1028 goto out;
1029 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1030 if (r)
1031 goto out;
1032 r = 0;
1033 break;
1034 }
1035 default:
1036 ;
1037 }
1038out:
1039 return r;
1040}
1041
1042int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1043 struct kvm_sregs *sregs)
1044{
1045 return -EINVAL;
1046}
1047
1048int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1049 struct kvm_sregs *sregs)
1050{
1051 return -EINVAL;
1052
1053}
1054int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1055 struct kvm_translation *tr)
1056{
1057
1058 return -EINVAL;
1059}
1060
1061static int kvm_alloc_vmm_area(void)
1062{
1063 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1064 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1065 get_order(KVM_VMM_SIZE));
1066 if (!kvm_vmm_base)
1067 return -ENOMEM;
1068
1069 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1070 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1071
1072 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1073 kvm_vmm_base, kvm_vm_buffer);
1074 }
1075
1076 return 0;
1077}
1078
1079static void kvm_free_vmm_area(void)
1080{
1081 if (kvm_vmm_base) {
1082 /*Zero this area before free to avoid bits leak!!*/
1083 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1084 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1085 kvm_vmm_base = 0;
1086 kvm_vm_buffer = 0;
1087 kvm_vsa_base = 0;
1088 }
1089}
1090
b024b793
XZ
1091static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1092{
1093}
1094
1095static int vti_init_vpd(struct kvm_vcpu *vcpu)
1096{
1097 int i;
1098 union cpuid3_t cpuid3;
1099 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1100
1101 if (IS_ERR(vpd))
1102 return PTR_ERR(vpd);
1103
1104 /* CPUID init */
1105 for (i = 0; i < 5; i++)
1106 vpd->vcpuid[i] = ia64_get_cpuid(i);
1107
1108 /* Limit the CPUID number to 5 */
1109 cpuid3.value = vpd->vcpuid[3];
1110 cpuid3.number = 4; /* 5 - 1 */
1111 vpd->vcpuid[3] = cpuid3.value;
1112
1113 /*Set vac and vdc fields*/
1114 vpd->vac.a_from_int_cr = 1;
1115 vpd->vac.a_to_int_cr = 1;
1116 vpd->vac.a_from_psr = 1;
1117 vpd->vac.a_from_cpuid = 1;
1118 vpd->vac.a_cover = 1;
1119 vpd->vac.a_bsw = 1;
1120 vpd->vac.a_int = 1;
1121 vpd->vdc.d_vmsw = 1;
1122
1123 /*Set virtual buffer*/
1124 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1125
1126 return 0;
1127}
1128
1129static int vti_create_vp(struct kvm_vcpu *vcpu)
1130{
1131 long ret;
1132 struct vpd *vpd = vcpu->arch.vpd;
1133 unsigned long vmm_ivt;
1134
1135 vmm_ivt = kvm_vmm_info->vmm_ivt;
1136
1137 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1138
1139 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1140
1141 if (ret) {
1142 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1143 return -EINVAL;
1144 }
1145 return 0;
1146}
1147
1148static void init_ptce_info(struct kvm_vcpu *vcpu)
1149{
1150 ia64_ptce_info_t ptce = {0};
1151
1152 ia64_get_ptce(&ptce);
1153 vcpu->arch.ptce_base = ptce.base;
1154 vcpu->arch.ptce_count[0] = ptce.count[0];
1155 vcpu->arch.ptce_count[1] = ptce.count[1];
1156 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1157 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1158}
1159
1160static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1161{
1162 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1163
1164 if (hrtimer_cancel(p_ht))
18dd36af 1165 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
b024b793
XZ
1166}
1167
1168static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1169{
1170 struct kvm_vcpu *vcpu;
1171 wait_queue_head_t *q;
1172
1173 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
decc9016
XZ
1174 q = &vcpu->wq;
1175
a4535290 1176 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
b024b793
XZ
1177 goto out;
1178
decc9016 1179 if (waitqueue_active(q))
b024b793 1180 wake_up_interruptible(q);
decc9016 1181
b024b793 1182out:
decc9016 1183 vcpu->arch.timer_fired = 1;
b024b793
XZ
1184 vcpu->arch.timer_check = 1;
1185 return HRTIMER_NORESTART;
1186}
1187
1188#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1189
1190int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1191{
1192 struct kvm_vcpu *v;
1193 int r;
1194 int i;
1195 long itc_offset;
1196 struct kvm *kvm = vcpu->kvm;
1197 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1198
1199 union context *p_ctx = &vcpu->arch.guest;
1200 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1201
1202 /*Init vcpu context for first run.*/
1203 if (IS_ERR(vmm_vcpu))
1204 return PTR_ERR(vmm_vcpu);
1205
1206 if (vcpu->vcpu_id == 0) {
a4535290 1207 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
1208
1209 /*Set entry address for first run.*/
1210 regs->cr_iip = PALE_RESET_ENTRY;
1211
a917f7af 1212 /*Initialize itc offset for vcpus*/
b024b793 1213 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
934d534f 1214 for (i = 0; i < kvm->arch.online_vcpus; i++) {
a917f7af
XZ
1215 v = (struct kvm_vcpu *)((char *)vcpu +
1216 sizeof(struct kvm_vcpu_data) * i);
b024b793
XZ
1217 v->arch.itc_offset = itc_offset;
1218 v->arch.last_itc = 0;
1219 }
1220 } else
a4535290 1221 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
b024b793
XZ
1222
1223 r = -ENOMEM;
1224 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1225 if (!vcpu->arch.apic)
1226 goto out;
1227 vcpu->arch.apic->vcpu = vcpu;
1228
1229 p_ctx->gr[1] = 0;
a917f7af 1230 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
b024b793
XZ
1231 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1232 p_ctx->psr = 0x1008522000UL;
1233 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1234 p_ctx->caller_unat = 0;
1235 p_ctx->pr = 0x0;
1236 p_ctx->ar[36] = 0x0; /*unat*/
1237 p_ctx->ar[19] = 0x0; /*rnat*/
1238 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1239 ((sizeof(struct kvm_vcpu)+15) & ~15);
1240 p_ctx->ar[64] = 0x0; /*pfs*/
1241 p_ctx->cr[0] = 0x7e04UL;
1242 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1243 p_ctx->cr[8] = 0x3c;
1244
1245 /*Initilize region register*/
1246 p_ctx->rr[0] = 0x30;
1247 p_ctx->rr[1] = 0x30;
1248 p_ctx->rr[2] = 0x30;
1249 p_ctx->rr[3] = 0x30;
1250 p_ctx->rr[4] = 0x30;
1251 p_ctx->rr[5] = 0x30;
1252 p_ctx->rr[7] = 0x30;
1253
1254 /*Initilize branch register 0*/
1255 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1256
1257 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1258 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1259 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1260
1261 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1262 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1263
1264 vcpu->arch.last_run_cpu = -1;
a917f7af 1265 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
b024b793
XZ
1266 vcpu->arch.vsa_base = kvm_vsa_base;
1267 vcpu->arch.__gp = kvm_vmm_gp;
1268 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
a917f7af
XZ
1269 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1270 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
b024b793
XZ
1271 init_ptce_info(vcpu);
1272
1273 r = 0;
1274out:
1275 return r;
1276}
1277
1278static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1279{
1280 unsigned long psr;
1281 int r;
1282
1283 local_irq_save(psr);
1284 r = kvm_insert_vmm_mapping(vcpu);
1285 if (r)
1286 goto fail;
1287 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1288 if (r)
1289 goto fail;
1290
1291 r = vti_init_vpd(vcpu);
1292 if (r) {
1293 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1294 goto uninit;
1295 }
1296
1297 r = vti_create_vp(vcpu);
1298 if (r)
1299 goto uninit;
1300
1301 kvm_purge_vmm_mapping(vcpu);
1302 local_irq_restore(psr);
1303
1304 return 0;
1305uninit:
1306 kvm_vcpu_uninit(vcpu);
1307fail:
cab7a1ee 1308 local_irq_restore(psr);
b024b793
XZ
1309 return r;
1310}
1311
1312struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1313 unsigned int id)
1314{
1315 struct kvm_vcpu *vcpu;
1316 unsigned long vm_base = kvm->arch.vm_base;
1317 int r;
1318 int cpu;
1319
a917f7af
XZ
1320 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1321
1322 r = -EINVAL;
1323 if (id >= KVM_MAX_VCPUS) {
1324 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1325 KVM_MAX_VCPUS);
1326 goto fail;
1327 }
1328
b024b793
XZ
1329 r = -ENOMEM;
1330 if (!vm_base) {
1331 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1332 goto fail;
1333 }
a917f7af
XZ
1334 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1335 vcpu_data[id].vcpu_struct));
b024b793
XZ
1336 vcpu->kvm = kvm;
1337
1338 cpu = get_cpu();
1339 vti_vcpu_load(vcpu, cpu);
1340 r = vti_vcpu_setup(vcpu, id);
1341 put_cpu();
1342
1343 if (r) {
1344 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1345 goto fail;
1346 }
1347
934d534f
JS
1348 kvm->arch.online_vcpus++;
1349
b024b793
XZ
1350 return vcpu;
1351fail:
1352 return ERR_PTR(r);
1353}
1354
1355int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1356{
1357 return 0;
1358}
1359
1360int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1361{
1362 return -EINVAL;
1363}
1364
1365int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1366{
1367 return -EINVAL;
1368}
1369
d0bfb940
JK
1370int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1371 struct kvm_guest_debug *dbg)
b024b793
XZ
1372{
1373 return -EINVAL;
1374}
1375
1376static void free_kvm(struct kvm *kvm)
1377{
1378 unsigned long vm_base = kvm->arch.vm_base;
1379
1380 if (vm_base) {
1381 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1382 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1383 }
1384
1385}
1386
1387static void kvm_release_vm_pages(struct kvm *kvm)
1388{
1389 struct kvm_memory_slot *memslot;
1390 int i, j;
1391 unsigned long base_gfn;
1392
1393 for (i = 0; i < kvm->nmemslots; i++) {
1394 memslot = &kvm->memslots[i];
1395 base_gfn = memslot->base_gfn;
1396
1397 for (j = 0; j < memslot->npages; j++) {
1398 if (memslot->rmap[j])
1399 put_page((struct page *)memslot->rmap[j]);
1400 }
1401 }
1402}
1403
ad8ba2cd
SY
1404void kvm_arch_sync_events(struct kvm *kvm)
1405{
1406}
1407
b024b793
XZ
1408void kvm_arch_destroy_vm(struct kvm *kvm)
1409{
2381ad24
XZ
1410 kvm_iommu_unmap_guest(kvm);
1411#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1412 kvm_free_all_assigned_devices(kvm);
1413#endif
b024b793
XZ
1414 kfree(kvm->arch.vioapic);
1415 kvm_release_vm_pages(kvm);
1416 kvm_free_physmem(kvm);
1417 free_kvm(kvm);
1418}
1419
1420void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1421{
1422}
1423
1424void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1425{
1426 if (cpu != vcpu->cpu) {
1427 vcpu->cpu = cpu;
1428 if (vcpu->arch.ht_active)
1429 kvm_migrate_hlt_timer(vcpu);
1430 }
1431}
1432
1433#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1434
1435int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1436{
b024b793 1437 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
042b26ed
JS
1438 int i;
1439
b024b793
XZ
1440 vcpu_load(vcpu);
1441
1442 for (i = 0; i < 16; i++) {
1443 regs->vpd.vgr[i] = vpd->vgr[i];
1444 regs->vpd.vbgr[i] = vpd->vbgr[i];
1445 }
1446 for (i = 0; i < 128; i++)
1447 regs->vpd.vcr[i] = vpd->vcr[i];
1448 regs->vpd.vhpi = vpd->vhpi;
1449 regs->vpd.vnat = vpd->vnat;
1450 regs->vpd.vbnat = vpd->vbnat;
1451 regs->vpd.vpsr = vpd->vpsr;
1452 regs->vpd.vpr = vpd->vpr;
1453
042b26ed
JS
1454 memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
1455
b024b793
XZ
1456 SAVE_REGS(mp_state);
1457 SAVE_REGS(vmm_rr);
1458 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1459 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1460 SAVE_REGS(itr_regions);
1461 SAVE_REGS(dtr_regions);
1462 SAVE_REGS(tc_regions);
1463 SAVE_REGS(irq_check);
1464 SAVE_REGS(itc_check);
1465 SAVE_REGS(timer_check);
1466 SAVE_REGS(timer_pending);
1467 SAVE_REGS(last_itc);
1468 for (i = 0; i < 8; i++) {
1469 regs->vrr[i] = vcpu->arch.vrr[i];
1470 regs->ibr[i] = vcpu->arch.ibr[i];
1471 regs->dbr[i] = vcpu->arch.dbr[i];
1472 }
1473 for (i = 0; i < 4; i++)
1474 regs->insvc[i] = vcpu->arch.insvc[i];
1475 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
1476 SAVE_REGS(xtp);
1477 SAVE_REGS(metaphysical_rr0);
1478 SAVE_REGS(metaphysical_rr4);
1479 SAVE_REGS(metaphysical_saved_rr0);
1480 SAVE_REGS(metaphysical_saved_rr4);
1481 SAVE_REGS(fp_psr);
1482 SAVE_REGS(saved_gp);
042b26ed 1483
b024b793 1484 vcpu_put(vcpu);
042b26ed 1485 return 0;
b024b793
XZ
1486}
1487
e9a999fe
JS
1488int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1489 struct kvm_ia64_vcpu_stack *stack)
1490{
1491 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1492 return 0;
1493}
1494
1495int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1496 struct kvm_ia64_vcpu_stack *stack)
1497{
1498 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1499 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1500
1501 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1502 return 0;
1503}
1504
b024b793
XZ
1505void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1506{
1507
1508 hrtimer_cancel(&vcpu->arch.hlt_timer);
1509 kfree(vcpu->arch.apic);
1510}
1511
1512
1513long kvm_arch_vcpu_ioctl(struct file *filp,
e9a999fe 1514 unsigned int ioctl, unsigned long arg)
b024b793 1515{
e9a999fe
JS
1516 struct kvm_vcpu *vcpu = filp->private_data;
1517 void __user *argp = (void __user *)arg;
1518 struct kvm_ia64_vcpu_stack *stack = NULL;
1519 long r;
1520
1521 switch (ioctl) {
1522 case KVM_IA64_VCPU_GET_STACK: {
1523 struct kvm_ia64_vcpu_stack __user *user_stack;
1524 void __user *first_p = argp;
1525
1526 r = -EFAULT;
1527 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1528 goto out;
1529
1530 if (!access_ok(VERIFY_WRITE, user_stack,
1531 sizeof(struct kvm_ia64_vcpu_stack))) {
1532 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1533 "Illegal user destination address for stack\n");
1534 goto out;
1535 }
1536 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1537 if (!stack) {
1538 r = -ENOMEM;
1539 goto out;
1540 }
1541
1542 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1543 if (r)
1544 goto out;
1545
1546 if (copy_to_user(user_stack, stack,
1547 sizeof(struct kvm_ia64_vcpu_stack)))
1548 goto out;
1549
1550 break;
1551 }
1552 case KVM_IA64_VCPU_SET_STACK: {
1553 struct kvm_ia64_vcpu_stack __user *user_stack;
1554 void __user *first_p = argp;
1555
1556 r = -EFAULT;
1557 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1558 goto out;
1559
1560 if (!access_ok(VERIFY_READ, user_stack,
1561 sizeof(struct kvm_ia64_vcpu_stack))) {
1562 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1563 "Illegal user address for stack\n");
1564 goto out;
1565 }
1566 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1567 if (!stack) {
1568 r = -ENOMEM;
1569 goto out;
1570 }
1571 if (copy_from_user(stack, user_stack,
1572 sizeof(struct kvm_ia64_vcpu_stack)))
1573 goto out;
1574
1575 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1576 break;
1577 }
1578
1579 default:
1580 r = -EINVAL;
1581 }
1582
1583out:
1584 kfree(stack);
1585 return r;
b024b793
XZ
1586}
1587
1588int kvm_arch_set_memory_region(struct kvm *kvm,
1589 struct kvm_userspace_memory_region *mem,
1590 struct kvm_memory_slot old,
1591 int user_alloc)
1592{
1593 unsigned long i;
1cbea809 1594 unsigned long pfn;
b024b793
XZ
1595 int npages = mem->memory_size >> PAGE_SHIFT;
1596 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1597 unsigned long base_gfn = memslot->base_gfn;
1598
a917f7af
XZ
1599 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1600 return -ENOMEM;
1601
b024b793 1602 for (i = 0; i < npages; i++) {
1cbea809
XZ
1603 pfn = gfn_to_pfn(kvm, base_gfn + i);
1604 if (!kvm_is_mmio_pfn(pfn)) {
1605 kvm_set_pmt_entry(kvm, base_gfn + i,
1606 pfn << PAGE_SHIFT,
b010eb51 1607 _PAGE_AR_RWX | _PAGE_MA_WB);
1cbea809
XZ
1608 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1609 } else {
1610 kvm_set_pmt_entry(kvm, base_gfn + i,
b010eb51 1611 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
1cbea809
XZ
1612 _PAGE_MA_UC);
1613 memslot->rmap[i] = 0;
1614 }
b024b793
XZ
1615 }
1616
1617 return 0;
1618}
1619
34d4cb8f
MT
1620void kvm_arch_flush_shadow(struct kvm *kvm)
1621{
1622}
b024b793
XZ
1623
1624long kvm_arch_dev_ioctl(struct file *filp,
e9a999fe 1625 unsigned int ioctl, unsigned long arg)
b024b793
XZ
1626{
1627 return -EINVAL;
1628}
1629
1630void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1631{
1632 kvm_vcpu_uninit(vcpu);
1633}
1634
1635static int vti_cpu_has_kvm_support(void)
1636{
1637 long avail = 1, status = 1, control = 1;
1638 long ret;
1639
1640 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1641 if (ret)
1642 goto out;
1643
1644 if (!(avail & PAL_PROC_VM_BIT))
1645 goto out;
1646
1647 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1648
1649 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1650 if (ret)
1651 goto out;
1652 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1653
1654 if (!(vp_env_info & VP_OPCODE)) {
1655 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1656 "vm_env_info:0x%lx\n", vp_env_info);
1657 }
1658
1659 return 1;
1660out:
1661 return 0;
1662}
1663
1664static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1665 struct module *module)
1666{
1667 unsigned long module_base;
1668 unsigned long vmm_size;
1669
1670 unsigned long vmm_offset, func_offset, fdesc_offset;
1671 struct fdesc *p_fdesc;
1672
1673 BUG_ON(!module);
1674
1675 if (!kvm_vmm_base) {
1676 printk("kvm: kvm area hasn't been initilized yet!!\n");
1677 return -EFAULT;
1678 }
1679
1680 /*Calculate new position of relocated vmm module.*/
1681 module_base = (unsigned long)module->module_core;
1682 vmm_size = module->core_size;
1683 if (unlikely(vmm_size > KVM_VMM_SIZE))
1684 return -EFAULT;
1685
1686 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1687 kvm_flush_icache(kvm_vmm_base, vmm_size);
1688
1689 /*Recalculate kvm_vmm_info based on new VMM*/
1690 vmm_offset = vmm_info->vmm_ivt - module_base;
1691 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1692 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1693 kvm_vmm_info->vmm_ivt);
1694
1695 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1696 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1697 fdesc_offset);
1698 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1699 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1700 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1701 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1702
1703 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1704 KVM_VMM_BASE+func_offset);
1705
1706 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1707 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1708 fdesc_offset);
1709 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1710 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1711 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1712 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1713
1714 kvm_vmm_gp = p_fdesc->gp;
1715
1716 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1717 kvm_vmm_info->vmm_entry);
1718 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1719 KVM_VMM_BASE + func_offset);
1720
1721 return 0;
1722}
1723
1724int kvm_arch_init(void *opaque)
1725{
1726 int r;
1727 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1728
1729 if (!vti_cpu_has_kvm_support()) {
1730 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1731 r = -EOPNOTSUPP;
1732 goto out;
1733 }
1734
1735 if (kvm_vmm_info) {
1736 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1737 r = -EEXIST;
1738 goto out;
1739 }
1740
1741 r = -ENOMEM;
1742 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1743 if (!kvm_vmm_info)
1744 goto out;
1745
1746 if (kvm_alloc_vmm_area())
1747 goto out_free0;
1748
1749 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1750 if (r)
1751 goto out_free1;
1752
1753 return 0;
1754
1755out_free1:
1756 kvm_free_vmm_area();
1757out_free0:
1758 kfree(kvm_vmm_info);
1759out:
1760 return r;
1761}
1762
1763void kvm_arch_exit(void)
1764{
1765 kvm_free_vmm_area();
1766 kfree(kvm_vmm_info);
1767 kvm_vmm_info = NULL;
1768}
1769
1770static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1771 struct kvm_dirty_log *log)
1772{
1773 struct kvm_memory_slot *memslot;
1774 int r, i;
1775 long n, base;
a917f7af
XZ
1776 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1777 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
b024b793
XZ
1778
1779 r = -EINVAL;
1780 if (log->slot >= KVM_MEMORY_SLOTS)
1781 goto out;
1782
1783 memslot = &kvm->memslots[log->slot];
1784 r = -ENOENT;
1785 if (!memslot->dirty_bitmap)
1786 goto out;
1787
1788 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1789 base = memslot->base_gfn / BITS_PER_LONG;
1790
1791 for (i = 0; i < n/sizeof(long); ++i) {
1792 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1793 dirty_bitmap[base + i] = 0;
1794 }
1795 r = 0;
1796out:
1797 return r;
1798}
1799
1800int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1801 struct kvm_dirty_log *log)
1802{
1803 int r;
1804 int n;
1805 struct kvm_memory_slot *memslot;
1806 int is_dirty = 0;
1807
1808 spin_lock(&kvm->arch.dirty_log_lock);
1809
1810 r = kvm_ia64_sync_dirty_log(kvm, log);
1811 if (r)
1812 goto out;
1813
1814 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1815 if (r)
1816 goto out;
1817
1818 /* If nothing is dirty, don't bother messing with page tables. */
1819 if (is_dirty) {
1820 kvm_flush_remote_tlbs(kvm);
1821 memslot = &kvm->memslots[log->slot];
1822 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1823 memset(memslot->dirty_bitmap, 0, n);
1824 }
1825 r = 0;
1826out:
1827 spin_unlock(&kvm->arch.dirty_log_lock);
1828 return r;
1829}
1830
1831int kvm_arch_hardware_setup(void)
1832{
1833 return 0;
1834}
1835
1836void kvm_arch_hardware_unsetup(void)
1837{
1838}
1839
1840static void vcpu_kick_intr(void *info)
1841{
1842#ifdef DEBUG
1843 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1844 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1845#endif
1846}
1847
1848void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1849{
1850 int ipi_pcpu = vcpu->cpu;
decc9016 1851 int cpu = get_cpu();
b024b793
XZ
1852
1853 if (waitqueue_active(&vcpu->wq))
1854 wake_up_interruptible(&vcpu->wq);
1855
decc9016 1856 if (vcpu->guest_mode && cpu != ipi_pcpu)
2f73ccab 1857 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
decc9016 1858 put_cpu();
b024b793
XZ
1859}
1860
58c2dde1 1861int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
b024b793 1862{
58c2dde1 1863 return __apic_accept_irq(vcpu, irq->vector);
b024b793
XZ
1864}
1865
1866int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1867{
1868 return apic->vcpu->vcpu_id == dest;
1869}
1870
1871int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1872{
1873 return 0;
1874}
1875
e1035715 1876int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
b024b793 1877{
e1035715 1878 return vcpu1->arch.xtp - vcpu2->arch.xtp;
b024b793
XZ
1879}
1880
343f94fe
GN
1881int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1882 int short_hand, int dest, int dest_mode)
1883{
58c2dde1 1884 struct kvm_lapic *target = vcpu->arch.apic;
343f94fe
GN
1885 return (dest_mode == 0) ?
1886 kvm_apic_match_physical_addr(target, dest) :
1887 kvm_apic_match_logical_addr(target, dest);
1888}
1889
b024b793
XZ
1890static int find_highest_bits(int *dat)
1891{
1892 u32 bits, bitnum;
1893 int i;
1894
1895 /* loop for all 256 bits */
1896 for (i = 7; i >= 0 ; i--) {
1897 bits = dat[i];
1898 if (bits) {
1899 bitnum = fls(bits);
1900 return i * 32 + bitnum - 1;
1901 }
1902 }
1903
1904 return -1;
1905}
1906
1907int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1908{
1909 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1910
1911 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1912 return NMI_VECTOR;
1913 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1914 return ExtINT_VECTOR;
1915
1916 return find_highest_bits((int *)&vpd->irr[0]);
1917}
1918
1919int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1920{
1921 if (kvm_highest_pending_irq(vcpu) != -1)
1922 return 1;
1923 return 0;
1924}
1925
3d80840d
MT
1926int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1927{
decc9016 1928 return vcpu->arch.timer_fired;
3d80840d
MT
1929}
1930
b024b793
XZ
1931gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1932{
1933 return gfn;
1934}
1935
1936int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1937{
a4535290 1938 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
b024b793 1939}
62d9f0db
MT
1940
1941int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1942 struct kvm_mp_state *mp_state)
1943{
8c4b537d
XZ
1944 vcpu_load(vcpu);
1945 mp_state->mp_state = vcpu->arch.mp_state;
1946 vcpu_put(vcpu);
1947 return 0;
1948}
1949
1950static int vcpu_reset(struct kvm_vcpu *vcpu)
1951{
1952 int r;
1953 long psr;
1954 local_irq_save(psr);
1955 r = kvm_insert_vmm_mapping(vcpu);
1956 if (r)
1957 goto fail;
1958
1959 vcpu->arch.launched = 0;
1960 kvm_arch_vcpu_uninit(vcpu);
1961 r = kvm_arch_vcpu_init(vcpu);
1962 if (r)
1963 goto fail;
1964
1965 kvm_purge_vmm_mapping(vcpu);
1966 r = 0;
1967fail:
1968 local_irq_restore(psr);
1969 return r;
62d9f0db
MT
1970}
1971
1972int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1973 struct kvm_mp_state *mp_state)
1974{
8c4b537d
XZ
1975 int r = 0;
1976
1977 vcpu_load(vcpu);
1978 vcpu->arch.mp_state = mp_state->mp_state;
1979 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1980 r = vcpu_reset(vcpu);
1981 vcpu_put(vcpu);
1982 return r;
62d9f0db 1983}