]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/s390/kvm/kvm-s390.c
KVM: modify alias layout in x86s struct kvm_arch
[net-next-2.6.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 71 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
72 { NULL }
73};
74
ef50f7ac 75static unsigned long long *facilities;
b0c632db
HC
76
77/* Section: not file related */
10474ae8 78int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
79{
80 /* every s390 is virtualization enabled ;-) */
10474ae8 81 return 0;
b0c632db
HC
82}
83
84void kvm_arch_hardware_disable(void *garbage)
85{
86}
87
b0c632db
HC
88int kvm_arch_hardware_setup(void)
89{
90 return 0;
91}
92
93void kvm_arch_hardware_unsetup(void)
94{
95}
96
97void kvm_arch_check_processor_compat(void *rtn)
98{
99}
100
101int kvm_arch_init(void *opaque)
102{
103 return 0;
104}
105
106void kvm_arch_exit(void)
107{
108}
109
110/* Section: device related */
111long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113{
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117}
118
119int kvm_dev_ioctl_check_extension(long ext)
120{
d7b0b5eb
CO
121 int r;
122
2bd0ac4e 123 switch (ext) {
d7b0b5eb
CO
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
2bd0ac4e 127 default:
d7b0b5eb 128 r = 0;
2bd0ac4e 129 }
d7b0b5eb 130 return r;
b0c632db
HC
131}
132
133/* Section: vm related */
134/*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139{
140 return 0;
141}
142
143long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145{
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
ba5c1e9b
CO
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
b0c632db 160 default:
367e1319 161 r = -ENOTTY;
b0c632db
HC
162 }
163
164 return r;
165}
166
167struct kvm *kvm_arch_create_vm(void)
168{
169 struct kvm *kvm;
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
175 goto out_nokvm;
176
177 rc = -ENOMEM;
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179 if (!kvm)
180 goto out_nokvm;
181
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183 if (!kvm->arch.sca)
184 goto out_nosca;
185
186 sprintf(debug_name, "kvm-%u", current->pid);
187
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189 if (!kvm->arch.dbf)
190 goto out_nodbf;
191
ba5c1e9b
CO
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
b0c632db
HC
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
197
b0c632db
HC
198 return kvm;
199out_nodbf:
200 free_page((unsigned long)(kvm->arch.sca));
201out_nosca:
202 kfree(kvm);
203out_nokvm:
204 return ERR_PTR(rc);
205}
206
d329c035
CB
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
abf4a71e
CO
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213 smp_mb();
d329c035 214 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 215 kvm_vcpu_uninit(vcpu);
d329c035
CB
216 kfree(vcpu);
217}
218
219static void kvm_free_vcpus(struct kvm *kvm)
220{
221 unsigned int i;
988a2cae 222 struct kvm_vcpu *vcpu;
d329c035 223
988a2cae
GN
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
226
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
230
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
d329c035
CB
233}
234
ad8ba2cd
SY
235void kvm_arch_sync_events(struct kvm *kvm)
236{
237}
238
b0c632db
HC
239void kvm_arch_destroy_vm(struct kvm *kvm)
240{
d329c035 241 kvm_free_vcpus(kvm);
dfdded7c 242 kvm_free_physmem(kvm);
b0c632db 243 free_page((unsigned long)(kvm->arch.sca));
d329c035 244 debug_unregister(kvm->arch.dbf);
b0c632db 245 kfree(kvm);
b0c632db
HC
246}
247
248/* Section: vcpu related */
249int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
250{
251 return 0;
252}
253
254void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
255{
6692cef3 256 /* Nothing todo */
b0c632db
HC
257}
258
259void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260{
261 save_fp_regs(&vcpu->arch.host_fpregs);
262 save_access_regs(vcpu->arch.host_acrs);
263 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
264 restore_fp_regs(&vcpu->arch.guest_fpregs);
265 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
266}
267
268void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
269{
270 save_fp_regs(&vcpu->arch.guest_fpregs);
271 save_access_regs(vcpu->arch.guest_acrs);
272 restore_fp_regs(&vcpu->arch.host_fpregs);
273 restore_access_regs(vcpu->arch.host_acrs);
274}
275
276static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
277{
278 /* this equals initial cpu reset in pop, but we don't switch to ESA */
279 vcpu->arch.sie_block->gpsw.mask = 0UL;
280 vcpu->arch.sie_block->gpsw.addr = 0UL;
281 vcpu->arch.sie_block->prefix = 0UL;
282 vcpu->arch.sie_block->ihcpu = 0xffff;
283 vcpu->arch.sie_block->cputm = 0UL;
284 vcpu->arch.sie_block->ckc = 0UL;
285 vcpu->arch.sie_block->todpr = 0;
286 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
287 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
288 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
289 vcpu->arch.guest_fpregs.fpc = 0;
290 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
291 vcpu->arch.sie_block->gbea = 1;
292}
293
294int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
295{
296 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 297 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
b0c632db
HC
298 vcpu->arch.sie_block->ecb = 2;
299 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 300 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
301 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
302 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
303 (unsigned long) vcpu);
304 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 305 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 306 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
307 return 0;
308}
309
310struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
311 unsigned int id)
312{
313 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
314 int rc = -ENOMEM;
315
316 if (!vcpu)
317 goto out_nomem;
318
180c12fb
CB
319 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
320 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
321
322 if (!vcpu->arch.sie_block)
323 goto out_free_cpu;
324
325 vcpu->arch.sie_block->icpua = id;
326 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
327 if (!kvm->arch.sca->cpu[id].sda)
328 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
329 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
330 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
331
ba5c1e9b
CO
332 spin_lock_init(&vcpu->arch.local_int.lock);
333 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
334 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 335 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
336 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
337 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 338 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 339 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 340
b0c632db
HC
341 rc = kvm_vcpu_init(vcpu, kvm, id);
342 if (rc)
343 goto out_free_cpu;
344 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
345 vcpu->arch.sie_block);
346
b0c632db
HC
347 return vcpu;
348out_free_cpu:
349 kfree(vcpu);
350out_nomem:
351 return ERR_PTR(rc);
352}
353
b0c632db
HC
354int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
355{
356 /* kvm common code refers to this, but never calls it */
357 BUG();
358 return 0;
359}
360
361static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
362{
363 vcpu_load(vcpu);
364 kvm_s390_vcpu_initial_reset(vcpu);
365 vcpu_put(vcpu);
366 return 0;
367}
368
369int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370{
371 vcpu_load(vcpu);
372 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
373 vcpu_put(vcpu);
374 return 0;
375}
376
377int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
378{
379 vcpu_load(vcpu);
380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
381 vcpu_put(vcpu);
382 return 0;
383}
384
385int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
386 struct kvm_sregs *sregs)
387{
388 vcpu_load(vcpu);
389 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
390 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
391 vcpu_put(vcpu);
392 return 0;
393}
394
395int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
396 struct kvm_sregs *sregs)
397{
398 vcpu_load(vcpu);
399 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
400 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
401 vcpu_put(vcpu);
402 return 0;
403}
404
405int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
406{
407 vcpu_load(vcpu);
408 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
409 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
410 vcpu_put(vcpu);
411 return 0;
412}
413
414int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
415{
416 vcpu_load(vcpu);
417 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
418 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
419 vcpu_put(vcpu);
420 return 0;
421}
422
423static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
424{
425 int rc = 0;
426
427 vcpu_load(vcpu);
428 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
429 rc = -EBUSY;
d7b0b5eb
CO
430 else {
431 vcpu->run->psw_mask = psw.mask;
432 vcpu->run->psw_addr = psw.addr;
433 }
b0c632db
HC
434 vcpu_put(vcpu);
435 return rc;
436}
437
438int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
439 struct kvm_translation *tr)
440{
441 return -EINVAL; /* not implemented yet */
442}
443
d0bfb940
JK
444int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
445 struct kvm_guest_debug *dbg)
b0c632db
HC
446{
447 return -EINVAL; /* not implemented yet */
448}
449
62d9f0db
MT
450int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
451 struct kvm_mp_state *mp_state)
452{
453 return -EINVAL; /* not implemented yet */
454}
455
456int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
457 struct kvm_mp_state *mp_state)
458{
459 return -EINVAL; /* not implemented yet */
460}
461
b0c632db
HC
462static void __vcpu_run(struct kvm_vcpu *vcpu)
463{
464 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
465
466 if (need_resched())
467 schedule();
468
71cde587
CB
469 if (test_thread_flag(TIF_MCCK_PENDING))
470 s390_handle_mcck();
471
0ff31867
CO
472 kvm_s390_deliver_pending_interrupts(vcpu);
473
b0c632db
HC
474 vcpu->arch.sie_block->icptcode = 0;
475 local_irq_disable();
476 kvm_guest_enter();
477 local_irq_enable();
478 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
479 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
480 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
481 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
482 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
483 }
b0c632db
HC
484 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
485 vcpu->arch.sie_block->icptcode);
486 local_irq_disable();
487 kvm_guest_exit();
488 local_irq_enable();
489
490 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
491}
492
493int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
494{
8f2abe6a 495 int rc;
b0c632db
HC
496 sigset_t sigsaved;
497
498 vcpu_load(vcpu);
499
9ace903d 500rerun_vcpu:
628eb9b8
CE
501 if (vcpu->requests)
502 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
503 kvm_s390_vcpu_set_mem(vcpu);
504
51e4d5ab 505 /* verify, that memory has been registered */
628eb9b8 506 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 507 vcpu_put(vcpu);
628eb9b8 508 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
509 return -EINVAL;
510 }
511
b0c632db
HC
512 if (vcpu->sigset_active)
513 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
514
515 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
516
ba5c1e9b
CO
517 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
518
8f2abe6a
CB
519 switch (kvm_run->exit_reason) {
520 case KVM_EXIT_S390_SIEIC:
8f2abe6a 521 case KVM_EXIT_UNKNOWN:
9ace903d 522 case KVM_EXIT_INTR:
8f2abe6a
CB
523 case KVM_EXIT_S390_RESET:
524 break;
525 default:
526 BUG();
527 }
528
d7b0b5eb
CO
529 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
530 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
531
dab4079d 532 might_fault();
8f2abe6a
CB
533
534 do {
535 __vcpu_run(vcpu);
8f2abe6a
CB
536 rc = kvm_handle_sie_intercept(vcpu);
537 } while (!signal_pending(current) && !rc);
538
9ace903d
CE
539 if (rc == SIE_INTERCEPT_RERUNVCPU)
540 goto rerun_vcpu;
541
b1d16c49
CE
542 if (signal_pending(current) && !rc) {
543 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 544 rc = -EINTR;
b1d16c49 545 }
8f2abe6a 546
b8e660b8 547 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
548 /* intercept cannot be handled in-kernel, prepare kvm-run */
549 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
550 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
551 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
552 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
553 rc = 0;
554 }
555
556 if (rc == -EREMOTE) {
557 /* intercept was handled, but userspace support is needed
558 * kvm_run has been prepared by the handler */
559 rc = 0;
560 }
b0c632db 561
d7b0b5eb
CO
562 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
563 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
564
b0c632db
HC
565 if (vcpu->sigset_active)
566 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
567
568 vcpu_put(vcpu);
569
570 vcpu->stat.exit_userspace++;
7e8e6ab4 571 return rc;
b0c632db
HC
572}
573
574static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
575 unsigned long n, int prefix)
576{
577 if (prefix)
578 return copy_to_guest(vcpu, guestdest, from, n);
579 else
580 return copy_to_guest_absolute(vcpu, guestdest, from, n);
581}
582
583/*
584 * store status at address
585 * we use have two special cases:
586 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
587 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
588 */
589int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
590{
591 const unsigned char archmode = 1;
592 int prefix;
593
594 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
595 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
596 return -EFAULT;
597 addr = SAVE_AREA_BASE;
598 prefix = 0;
599 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
600 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
601 return -EFAULT;
602 addr = SAVE_AREA_BASE;
603 prefix = 1;
604 } else
605 prefix = 0;
606
f64ca217 607 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
608 vcpu->arch.guest_fpregs.fprs, 128, prefix))
609 return -EFAULT;
610
f64ca217 611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
612 vcpu->arch.guest_gprs, 128, prefix))
613 return -EFAULT;
614
f64ca217 615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
616 &vcpu->arch.sie_block->gpsw, 16, prefix))
617 return -EFAULT;
618
f64ca217 619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
620 &vcpu->arch.sie_block->prefix, 4, prefix))
621 return -EFAULT;
622
623 if (__guestcopy(vcpu,
f64ca217 624 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
625 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
626 return -EFAULT;
627
f64ca217 628 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
629 &vcpu->arch.sie_block->todpr, 4, prefix))
630 return -EFAULT;
631
f64ca217 632 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
633 &vcpu->arch.sie_block->cputm, 8, prefix))
634 return -EFAULT;
635
f64ca217 636 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
637 &vcpu->arch.sie_block->ckc, 8, prefix))
638 return -EFAULT;
639
f64ca217 640 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
641 &vcpu->arch.guest_acrs, 64, prefix))
642 return -EFAULT;
643
644 if (__guestcopy(vcpu,
f64ca217 645 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
646 &vcpu->arch.sie_block->gcr, 128, prefix))
647 return -EFAULT;
648 return 0;
649}
650
651static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
652{
653 int rc;
654
655 vcpu_load(vcpu);
656 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
657 vcpu_put(vcpu);
658 return rc;
659}
660
661long kvm_arch_vcpu_ioctl(struct file *filp,
662 unsigned int ioctl, unsigned long arg)
663{
664 struct kvm_vcpu *vcpu = filp->private_data;
665 void __user *argp = (void __user *)arg;
666
667 switch (ioctl) {
ba5c1e9b
CO
668 case KVM_S390_INTERRUPT: {
669 struct kvm_s390_interrupt s390int;
670
671 if (copy_from_user(&s390int, argp, sizeof(s390int)))
672 return -EFAULT;
673 return kvm_s390_inject_vcpu(vcpu, &s390int);
674 }
b0c632db
HC
675 case KVM_S390_STORE_STATUS:
676 return kvm_s390_vcpu_store_status(vcpu, arg);
677 case KVM_S390_SET_INITIAL_PSW: {
678 psw_t psw;
679
680 if (copy_from_user(&psw, argp, sizeof(psw)))
681 return -EFAULT;
682 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
683 }
684 case KVM_S390_INITIAL_RESET:
685 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
686 default:
687 ;
688 }
689 return -EINVAL;
690}
691
692/* Section: memory related */
693int kvm_arch_set_memory_region(struct kvm *kvm,
694 struct kvm_userspace_memory_region *mem,
695 struct kvm_memory_slot old,
696 int user_alloc)
697{
2668dab7 698 int i;
988a2cae 699 struct kvm_vcpu *vcpu;
2668dab7 700
b0c632db
HC
701 /* A few sanity checks. We can have exactly one memory slot which has
702 to start at guest virtual zero and which has to be located at a
703 page boundary in userland and which has to end at a page boundary.
704 The memory in userland is ok to be fragmented into various different
705 vmas. It is okay to mmap() and munmap() stuff in this slot after
706 doing this call at any time */
707
628eb9b8 708 if (mem->slot)
b0c632db
HC
709 return -EINVAL;
710
711 if (mem->guest_phys_addr)
712 return -EINVAL;
713
714 if (mem->userspace_addr & (PAGE_SIZE - 1))
715 return -EINVAL;
716
717 if (mem->memory_size & (PAGE_SIZE - 1))
718 return -EINVAL;
719
2668dab7
CO
720 if (!user_alloc)
721 return -EINVAL;
722
628eb9b8 723 /* request update of sie control block for all available vcpus */
988a2cae
GN
724 kvm_for_each_vcpu(i, vcpu, kvm) {
725 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
726 continue;
727 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 728 }
b0c632db
HC
729
730 return 0;
731}
732
34d4cb8f
MT
733void kvm_arch_flush_shadow(struct kvm *kvm)
734{
735}
736
b0c632db
HC
737gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
738{
739 return gfn;
740}
741
742static int __init kvm_s390_init(void)
743{
ef50f7ac
CB
744 int ret;
745 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
746 if (ret)
747 return ret;
748
749 /*
750 * guests can ask for up to 255+1 double words, we need a full page
751 * to hold the maximum amount of facilites. On the other hand, we
752 * only set facilities that are known to work in KVM.
753 */
754 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
755 if (!facilities) {
756 kvm_exit();
757 return -ENOMEM;
758 }
759 stfle(facilities, 1);
760 facilities[0] &= 0xff00fff3f0700000ULL;
761 return 0;
b0c632db
HC
762}
763
764static void __exit kvm_s390_exit(void)
765{
ef50f7ac 766 free_page((unsigned long) facilities);
b0c632db
HC
767 kvm_exit();
768}
769
770module_init(kvm_s390_init);
771module_exit(kvm_s390_exit);