]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/s390/kvm/kvm-s390.c
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[net-next-2.6.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 71 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
72 { NULL }
73};
74
ef50f7ac 75static unsigned long long *facilities;
b0c632db
HC
76
77/* Section: not file related */
10474ae8 78int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
79{
80 /* every s390 is virtualization enabled ;-) */
10474ae8 81 return 0;
b0c632db
HC
82}
83
84void kvm_arch_hardware_disable(void *garbage)
85{
86}
87
b0c632db
HC
88int kvm_arch_hardware_setup(void)
89{
90 return 0;
91}
92
93void kvm_arch_hardware_unsetup(void)
94{
95}
96
97void kvm_arch_check_processor_compat(void *rtn)
98{
99}
100
101int kvm_arch_init(void *opaque)
102{
103 return 0;
104}
105
106void kvm_arch_exit(void)
107{
108}
109
110/* Section: device related */
111long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113{
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117}
118
119int kvm_dev_ioctl_check_extension(long ext)
120{
d7b0b5eb
CO
121 int r;
122
2bd0ac4e 123 switch (ext) {
d7b0b5eb
CO
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
2bd0ac4e 127 default:
d7b0b5eb 128 r = 0;
2bd0ac4e 129 }
d7b0b5eb 130 return r;
b0c632db
HC
131}
132
133/* Section: vm related */
134/*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139{
140 return 0;
141}
142
143long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145{
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
ba5c1e9b
CO
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
b0c632db 160 default:
367e1319 161 r = -ENOTTY;
b0c632db
HC
162 }
163
164 return r;
165}
166
167struct kvm *kvm_arch_create_vm(void)
168{
169 struct kvm *kvm;
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
175 goto out_nokvm;
176
177 rc = -ENOMEM;
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179 if (!kvm)
180 goto out_nokvm;
181
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183 if (!kvm->arch.sca)
184 goto out_nosca;
185
186 sprintf(debug_name, "kvm-%u", current->pid);
187
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189 if (!kvm->arch.dbf)
190 goto out_nodbf;
191
ba5c1e9b
CO
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
b0c632db
HC
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
197
b0c632db
HC
198 return kvm;
199out_nodbf:
200 free_page((unsigned long)(kvm->arch.sca));
201out_nosca:
202 kfree(kvm);
203out_nokvm:
204 return ERR_PTR(rc);
205}
206
d329c035
CB
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
abf4a71e
CO
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213 smp_mb();
d329c035 214 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 215 kvm_vcpu_uninit(vcpu);
d329c035
CB
216 kfree(vcpu);
217}
218
219static void kvm_free_vcpus(struct kvm *kvm)
220{
221 unsigned int i;
988a2cae 222 struct kvm_vcpu *vcpu;
d329c035 223
988a2cae
GN
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
226
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
230
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
d329c035
CB
233}
234
ad8ba2cd
SY
235void kvm_arch_sync_events(struct kvm *kvm)
236{
237}
238
b0c632db
HC
239void kvm_arch_destroy_vm(struct kvm *kvm)
240{
d329c035 241 kvm_free_vcpus(kvm);
dfdded7c 242 kvm_free_physmem(kvm);
b0c632db 243 free_page((unsigned long)(kvm->arch.sca));
d329c035 244 debug_unregister(kvm->arch.dbf);
64749204 245 cleanup_srcu_struct(&kvm->srcu);
b0c632db 246 kfree(kvm);
b0c632db
HC
247}
248
249/* Section: vcpu related */
250int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251{
252 return 0;
253}
254
255void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256{
6692cef3 257 /* Nothing todo */
b0c632db
HC
258}
259
260void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261{
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
267}
268
269void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270{
271 save_fp_regs(&vcpu->arch.guest_fpregs);
272 save_access_regs(vcpu->arch.guest_acrs);
273 restore_fp_regs(&vcpu->arch.host_fpregs);
274 restore_access_regs(vcpu->arch.host_acrs);
275}
276
277static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278{
279 /* this equals initial cpu reset in pop, but we don't switch to ESA */
280 vcpu->arch.sie_block->gpsw.mask = 0UL;
281 vcpu->arch.sie_block->gpsw.addr = 0UL;
282 vcpu->arch.sie_block->prefix = 0UL;
283 vcpu->arch.sie_block->ihcpu = 0xffff;
284 vcpu->arch.sie_block->cputm = 0UL;
285 vcpu->arch.sie_block->ckc = 0UL;
286 vcpu->arch.sie_block->todpr = 0;
287 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
289 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290 vcpu->arch.guest_fpregs.fpc = 0;
291 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292 vcpu->arch.sie_block->gbea = 1;
293}
294
295int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296{
297 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 298 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
b0c632db
HC
299 vcpu->arch.sie_block->ecb = 2;
300 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 301 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
302 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304 (unsigned long) vcpu);
305 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 306 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 307 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
308 return 0;
309}
310
311struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
312 unsigned int id)
313{
314 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315 int rc = -ENOMEM;
316
317 if (!vcpu)
318 goto out_nomem;
319
180c12fb
CB
320 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
322
323 if (!vcpu->arch.sie_block)
324 goto out_free_cpu;
325
326 vcpu->arch.sie_block->icpua = id;
327 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
328 if (!kvm->arch.sca->cpu[id].sda)
329 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
330 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
332
ba5c1e9b
CO
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 336 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 340 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 341
b0c632db
HC
342 rc = kvm_vcpu_init(vcpu, kvm, id);
343 if (rc)
7b06bf2f 344 goto out_free_sie_block;
b0c632db
HC
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
347
b0c632db 348 return vcpu;
7b06bf2f
WY
349out_free_sie_block:
350 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
351out_free_cpu:
352 kfree(vcpu);
353out_nomem:
354 return ERR_PTR(rc);
355}
356
b0c632db
HC
357int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358{
359 /* kvm common code refers to this, but never calls it */
360 BUG();
361 return 0;
362}
363
364static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365{
366 vcpu_load(vcpu);
367 kvm_s390_vcpu_initial_reset(vcpu);
368 vcpu_put(vcpu);
369 return 0;
370}
371
372int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373{
374 vcpu_load(vcpu);
375 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
376 vcpu_put(vcpu);
377 return 0;
378}
379
380int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
381{
382 vcpu_load(vcpu);
383 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
384 vcpu_put(vcpu);
385 return 0;
386}
387
388int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
389 struct kvm_sregs *sregs)
390{
391 vcpu_load(vcpu);
392 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
393 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
394 vcpu_put(vcpu);
395 return 0;
396}
397
398int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
399 struct kvm_sregs *sregs)
400{
401 vcpu_load(vcpu);
402 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
403 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
404 vcpu_put(vcpu);
405 return 0;
406}
407
408int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
409{
410 vcpu_load(vcpu);
411 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
412 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
413 vcpu_put(vcpu);
414 return 0;
415}
416
417int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418{
419 vcpu_load(vcpu);
420 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
421 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
422 vcpu_put(vcpu);
423 return 0;
424}
425
426static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
427{
428 int rc = 0;
429
430 vcpu_load(vcpu);
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
432 rc = -EBUSY;
d7b0b5eb
CO
433 else {
434 vcpu->run->psw_mask = psw.mask;
435 vcpu->run->psw_addr = psw.addr;
436 }
b0c632db
HC
437 vcpu_put(vcpu);
438 return rc;
439}
440
441int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
442 struct kvm_translation *tr)
443{
444 return -EINVAL; /* not implemented yet */
445}
446
d0bfb940
JK
447int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
448 struct kvm_guest_debug *dbg)
b0c632db
HC
449{
450 return -EINVAL; /* not implemented yet */
451}
452
62d9f0db
MT
453int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
454 struct kvm_mp_state *mp_state)
455{
456 return -EINVAL; /* not implemented yet */
457}
458
459int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
460 struct kvm_mp_state *mp_state)
461{
462 return -EINVAL; /* not implemented yet */
463}
464
b0c632db
HC
465static void __vcpu_run(struct kvm_vcpu *vcpu)
466{
467 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
468
469 if (need_resched())
470 schedule();
471
71cde587
CB
472 if (test_thread_flag(TIF_MCCK_PENDING))
473 s390_handle_mcck();
474
0ff31867
CO
475 kvm_s390_deliver_pending_interrupts(vcpu);
476
b0c632db
HC
477 vcpu->arch.sie_block->icptcode = 0;
478 local_irq_disable();
479 kvm_guest_enter();
480 local_irq_enable();
481 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
482 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
483 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
484 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
485 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
486 }
b0c632db
HC
487 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
488 vcpu->arch.sie_block->icptcode);
489 local_irq_disable();
490 kvm_guest_exit();
491 local_irq_enable();
492
493 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
494}
495
496int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
497{
8f2abe6a 498 int rc;
b0c632db
HC
499 sigset_t sigsaved;
500
501 vcpu_load(vcpu);
502
9ace903d 503rerun_vcpu:
628eb9b8
CE
504 if (vcpu->requests)
505 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
506 kvm_s390_vcpu_set_mem(vcpu);
507
51e4d5ab 508 /* verify, that memory has been registered */
628eb9b8 509 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 510 vcpu_put(vcpu);
628eb9b8 511 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
512 return -EINVAL;
513 }
514
b0c632db
HC
515 if (vcpu->sigset_active)
516 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
517
518 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
519
ba5c1e9b
CO
520 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
521
8f2abe6a
CB
522 switch (kvm_run->exit_reason) {
523 case KVM_EXIT_S390_SIEIC:
8f2abe6a 524 case KVM_EXIT_UNKNOWN:
9ace903d 525 case KVM_EXIT_INTR:
8f2abe6a
CB
526 case KVM_EXIT_S390_RESET:
527 break;
528 default:
529 BUG();
530 }
531
d7b0b5eb
CO
532 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
533 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
534
dab4079d 535 might_fault();
8f2abe6a
CB
536
537 do {
538 __vcpu_run(vcpu);
8f2abe6a
CB
539 rc = kvm_handle_sie_intercept(vcpu);
540 } while (!signal_pending(current) && !rc);
541
9ace903d
CE
542 if (rc == SIE_INTERCEPT_RERUNVCPU)
543 goto rerun_vcpu;
544
b1d16c49
CE
545 if (signal_pending(current) && !rc) {
546 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 547 rc = -EINTR;
b1d16c49 548 }
8f2abe6a 549
b8e660b8 550 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
551 /* intercept cannot be handled in-kernel, prepare kvm-run */
552 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
553 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
554 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
555 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
556 rc = 0;
557 }
558
559 if (rc == -EREMOTE) {
560 /* intercept was handled, but userspace support is needed
561 * kvm_run has been prepared by the handler */
562 rc = 0;
563 }
b0c632db 564
d7b0b5eb
CO
565 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
566 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
567
b0c632db
HC
568 if (vcpu->sigset_active)
569 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
570
571 vcpu_put(vcpu);
572
573 vcpu->stat.exit_userspace++;
7e8e6ab4 574 return rc;
b0c632db
HC
575}
576
577static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
578 unsigned long n, int prefix)
579{
580 if (prefix)
581 return copy_to_guest(vcpu, guestdest, from, n);
582 else
583 return copy_to_guest_absolute(vcpu, guestdest, from, n);
584}
585
586/*
587 * store status at address
588 * we use have two special cases:
589 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
590 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
591 */
592int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
593{
594 const unsigned char archmode = 1;
595 int prefix;
596
597 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
598 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
599 return -EFAULT;
600 addr = SAVE_AREA_BASE;
601 prefix = 0;
602 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
603 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
604 return -EFAULT;
605 addr = SAVE_AREA_BASE;
606 prefix = 1;
607 } else
608 prefix = 0;
609
f64ca217 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
611 vcpu->arch.guest_fpregs.fprs, 128, prefix))
612 return -EFAULT;
613
f64ca217 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
615 vcpu->arch.guest_gprs, 128, prefix))
616 return -EFAULT;
617
f64ca217 618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
619 &vcpu->arch.sie_block->gpsw, 16, prefix))
620 return -EFAULT;
621
f64ca217 622 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
623 &vcpu->arch.sie_block->prefix, 4, prefix))
624 return -EFAULT;
625
626 if (__guestcopy(vcpu,
f64ca217 627 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
628 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
629 return -EFAULT;
630
f64ca217 631 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
632 &vcpu->arch.sie_block->todpr, 4, prefix))
633 return -EFAULT;
634
f64ca217 635 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
636 &vcpu->arch.sie_block->cputm, 8, prefix))
637 return -EFAULT;
638
f64ca217 639 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
640 &vcpu->arch.sie_block->ckc, 8, prefix))
641 return -EFAULT;
642
f64ca217 643 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
644 &vcpu->arch.guest_acrs, 64, prefix))
645 return -EFAULT;
646
647 if (__guestcopy(vcpu,
f64ca217 648 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
649 &vcpu->arch.sie_block->gcr, 128, prefix))
650 return -EFAULT;
651 return 0;
652}
653
654static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
655{
656 int rc;
657
658 vcpu_load(vcpu);
659 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
660 vcpu_put(vcpu);
661 return rc;
662}
663
664long kvm_arch_vcpu_ioctl(struct file *filp,
665 unsigned int ioctl, unsigned long arg)
666{
667 struct kvm_vcpu *vcpu = filp->private_data;
668 void __user *argp = (void __user *)arg;
669
670 switch (ioctl) {
ba5c1e9b
CO
671 case KVM_S390_INTERRUPT: {
672 struct kvm_s390_interrupt s390int;
673
674 if (copy_from_user(&s390int, argp, sizeof(s390int)))
675 return -EFAULT;
676 return kvm_s390_inject_vcpu(vcpu, &s390int);
677 }
b0c632db
HC
678 case KVM_S390_STORE_STATUS:
679 return kvm_s390_vcpu_store_status(vcpu, arg);
680 case KVM_S390_SET_INITIAL_PSW: {
681 psw_t psw;
682
683 if (copy_from_user(&psw, argp, sizeof(psw)))
684 return -EFAULT;
685 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
686 }
687 case KVM_S390_INITIAL_RESET:
688 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
689 default:
690 ;
691 }
692 return -EINVAL;
693}
694
695/* Section: memory related */
f7784b8e
MT
696int kvm_arch_prepare_memory_region(struct kvm *kvm,
697 struct kvm_memory_slot *memslot,
698 struct kvm_memory_slot old,
699 struct kvm_userspace_memory_region *mem,
700 int user_alloc)
b0c632db
HC
701{
702 /* A few sanity checks. We can have exactly one memory slot which has
703 to start at guest virtual zero and which has to be located at a
704 page boundary in userland and which has to end at a page boundary.
705 The memory in userland is ok to be fragmented into various different
706 vmas. It is okay to mmap() and munmap() stuff in this slot after
707 doing this call at any time */
708
628eb9b8 709 if (mem->slot)
b0c632db
HC
710 return -EINVAL;
711
712 if (mem->guest_phys_addr)
713 return -EINVAL;
714
715 if (mem->userspace_addr & (PAGE_SIZE - 1))
716 return -EINVAL;
717
718 if (mem->memory_size & (PAGE_SIZE - 1))
719 return -EINVAL;
720
2668dab7
CO
721 if (!user_alloc)
722 return -EINVAL;
723
f7784b8e
MT
724 return 0;
725}
726
727void kvm_arch_commit_memory_region(struct kvm *kvm,
728 struct kvm_userspace_memory_region *mem,
729 struct kvm_memory_slot old,
730 int user_alloc)
731{
732 int i;
733 struct kvm_vcpu *vcpu;
734
628eb9b8 735 /* request update of sie control block for all available vcpus */
988a2cae
GN
736 kvm_for_each_vcpu(i, vcpu, kvm) {
737 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
738 continue;
739 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 740 }
b0c632db
HC
741}
742
34d4cb8f
MT
743void kvm_arch_flush_shadow(struct kvm *kvm)
744{
745}
746
b0c632db
HC
747gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
748{
749 return gfn;
750}
751
752static int __init kvm_s390_init(void)
753{
ef50f7ac 754 int ret;
0ee75bea 755 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
756 if (ret)
757 return ret;
758
759 /*
760 * guests can ask for up to 255+1 double words, we need a full page
761 * to hold the maximum amount of facilites. On the other hand, we
762 * only set facilities that are known to work in KVM.
763 */
c2f0e8c8 764 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
765 if (!facilities) {
766 kvm_exit();
767 return -ENOMEM;
768 }
769 stfle(facilities, 1);
770 facilities[0] &= 0xff00fff3f0700000ULL;
771 return 0;
b0c632db
HC
772}
773
774static void __exit kvm_s390_exit(void)
775{
ef50f7ac 776 free_page((unsigned long) facilities);
b0c632db
HC
777 kvm_exit();
778}
779
780module_init(kvm_s390_init);
781module_exit(kvm_s390_exit);