KVM: static inline empty kvm_arch functions
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 int kvm_arch_hardware_setup(void)
96 {
97         return 0;
98 }
99
100 void kvm_arch_hardware_unsetup(void)
101 {
102 }
103
104 int kvm_arch_init(void *opaque)
105 {
106         return 0;
107 }
108
109 /* Section: device related */
110 long kvm_arch_dev_ioctl(struct file *filp,
111                         unsigned int ioctl, unsigned long arg)
112 {
113         if (ioctl == KVM_S390_ENABLE_SIE)
114                 return s390_enable_sie();
115         return -EINVAL;
116 }
117
118 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
119 {
120         int r;
121
122         switch (ext) {
123         case KVM_CAP_S390_PSW:
124         case KVM_CAP_S390_GMAP:
125         case KVM_CAP_SYNC_MMU:
126 #ifdef CONFIG_KVM_S390_UCONTROL
127         case KVM_CAP_S390_UCONTROL:
128 #endif
129         case KVM_CAP_SYNC_REGS:
130         case KVM_CAP_ONE_REG:
131         case KVM_CAP_ENABLE_CAP:
132         case KVM_CAP_S390_CSS_SUPPORT:
133         case KVM_CAP_IOEVENTFD:
134                 r = 1;
135                 break;
136         case KVM_CAP_NR_VCPUS:
137         case KVM_CAP_MAX_VCPUS:
138                 r = KVM_MAX_VCPUS;
139                 break;
140         case KVM_CAP_NR_MEMSLOTS:
141                 r = KVM_USER_MEM_SLOTS;
142                 break;
143         case KVM_CAP_S390_COW:
144                 r = MACHINE_HAS_ESOP;
145                 break;
146         default:
147                 r = 0;
148         }
149         return r;
150 }
151
152 /* Section: vm related */
153 /*
154  * Get (and clear) the dirty memory log for a memory slot.
155  */
156 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
157                                struct kvm_dirty_log *log)
158 {
159         return 0;
160 }
161
162 long kvm_arch_vm_ioctl(struct file *filp,
163                        unsigned int ioctl, unsigned long arg)
164 {
165         struct kvm *kvm = filp->private_data;
166         void __user *argp = (void __user *)arg;
167         int r;
168
169         switch (ioctl) {
170         case KVM_S390_INTERRUPT: {
171                 struct kvm_s390_interrupt s390int;
172
173                 r = -EFAULT;
174                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
175                         break;
176                 r = kvm_s390_inject_vm(kvm, &s390int);
177                 break;
178         }
179         default:
180                 r = -ENOTTY;
181         }
182
183         return r;
184 }
185
186 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
187 {
188         int rc;
189         char debug_name[16];
190
191         rc = -EINVAL;
192 #ifdef CONFIG_KVM_S390_UCONTROL
193         if (type & ~KVM_VM_S390_UCONTROL)
194                 goto out_err;
195         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
196                 goto out_err;
197 #else
198         if (type)
199                 goto out_err;
200 #endif
201
202         rc = s390_enable_sie();
203         if (rc)
204                 goto out_err;
205
206         rc = -ENOMEM;
207
208         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
209         if (!kvm->arch.sca)
210                 goto out_err;
211
212         sprintf(debug_name, "kvm-%u", current->pid);
213
214         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
215         if (!kvm->arch.dbf)
216                 goto out_nodbf;
217
218         spin_lock_init(&kvm->arch.float_int.lock);
219         INIT_LIST_HEAD(&kvm->arch.float_int.list);
220
221         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
222         VM_EVENT(kvm, 3, "%s", "vm created");
223
224         if (type & KVM_VM_S390_UCONTROL) {
225                 kvm->arch.gmap = NULL;
226         } else {
227                 kvm->arch.gmap = gmap_alloc(current->mm);
228                 if (!kvm->arch.gmap)
229                         goto out_nogmap;
230         }
231
232         kvm->arch.css_support = 0;
233
234         return 0;
235 out_nogmap:
236         debug_unregister(kvm->arch.dbf);
237 out_nodbf:
238         free_page((unsigned long)(kvm->arch.sca));
239 out_err:
240         return rc;
241 }
242
243 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
244 {
245         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
246         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
247         if (!kvm_is_ucontrol(vcpu->kvm)) {
248                 clear_bit(63 - vcpu->vcpu_id,
249                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
250                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
251                     (__u64) vcpu->arch.sie_block)
252                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
253         }
254         smp_mb();
255
256         if (kvm_is_ucontrol(vcpu->kvm))
257                 gmap_free(vcpu->arch.gmap);
258
259         free_page((unsigned long)(vcpu->arch.sie_block));
260         kvm_vcpu_uninit(vcpu);
261         kfree(vcpu);
262 }
263
264 static void kvm_free_vcpus(struct kvm *kvm)
265 {
266         unsigned int i;
267         struct kvm_vcpu *vcpu;
268
269         kvm_for_each_vcpu(i, vcpu, kvm)
270                 kvm_arch_vcpu_destroy(vcpu);
271
272         mutex_lock(&kvm->lock);
273         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
274                 kvm->vcpus[i] = NULL;
275
276         atomic_set(&kvm->online_vcpus, 0);
277         mutex_unlock(&kvm->lock);
278 }
279
280 void kvm_arch_destroy_vm(struct kvm *kvm)
281 {
282         kvm_free_vcpus(kvm);
283         free_page((unsigned long)(kvm->arch.sca));
284         debug_unregister(kvm->arch.dbf);
285         if (!kvm_is_ucontrol(kvm))
286                 gmap_free(kvm->arch.gmap);
287 }
288
289 /* Section: vcpu related */
290 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
291 {
292         if (kvm_is_ucontrol(vcpu->kvm)) {
293                 vcpu->arch.gmap = gmap_alloc(current->mm);
294                 if (!vcpu->arch.gmap)
295                         return -ENOMEM;
296                 return 0;
297         }
298
299         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
300         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
301                                     KVM_SYNC_GPRS |
302                                     KVM_SYNC_ACRS |
303                                     KVM_SYNC_CRS;
304         return 0;
305 }
306
307 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
308 {
309         save_fp_regs(&vcpu->arch.host_fpregs);
310         save_access_regs(vcpu->arch.host_acrs);
311         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
312         restore_fp_regs(&vcpu->arch.guest_fpregs);
313         restore_access_regs(vcpu->run->s.regs.acrs);
314         gmap_enable(vcpu->arch.gmap);
315         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
316 }
317
318 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
319 {
320         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
321         gmap_disable(vcpu->arch.gmap);
322         save_fp_regs(&vcpu->arch.guest_fpregs);
323         save_access_regs(vcpu->run->s.regs.acrs);
324         restore_fp_regs(&vcpu->arch.host_fpregs);
325         restore_access_regs(vcpu->arch.host_acrs);
326 }
327
328 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
329 {
330         /* this equals initial cpu reset in pop, but we don't switch to ESA */
331         vcpu->arch.sie_block->gpsw.mask = 0UL;
332         vcpu->arch.sie_block->gpsw.addr = 0UL;
333         kvm_s390_set_prefix(vcpu, 0);
334         vcpu->arch.sie_block->cputm     = 0UL;
335         vcpu->arch.sie_block->ckc       = 0UL;
336         vcpu->arch.sie_block->todpr     = 0;
337         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
338         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
339         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
340         vcpu->arch.guest_fpregs.fpc = 0;
341         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
342         vcpu->arch.sie_block->gbea = 1;
343         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
344 }
345
346 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
347 {
348         return 0;
349 }
350
351 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
352 {
353         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
354                                                     CPUSTAT_SM |
355                                                     CPUSTAT_STOPPED);
356         vcpu->arch.sie_block->ecb   = 6;
357         vcpu->arch.sie_block->eca   = 0xC1002001U;
358         vcpu->arch.sie_block->fac   = (int) (long) facilities;
359         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
360         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
361                      (unsigned long) vcpu);
362         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
363         get_cpu_id(&vcpu->arch.cpu_id);
364         vcpu->arch.cpu_id.version = 0xff;
365         return 0;
366 }
367
368 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
369                                       unsigned int id)
370 {
371         struct kvm_vcpu *vcpu;
372         int rc = -EINVAL;
373
374         if (id >= KVM_MAX_VCPUS)
375                 goto out;
376
377         rc = -ENOMEM;
378
379         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
380         if (!vcpu)
381                 goto out;
382
383         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
384                                         get_zeroed_page(GFP_KERNEL);
385
386         if (!vcpu->arch.sie_block)
387                 goto out_free_cpu;
388
389         vcpu->arch.sie_block->icpua = id;
390         if (!kvm_is_ucontrol(kvm)) {
391                 if (!kvm->arch.sca) {
392                         WARN_ON_ONCE(1);
393                         goto out_free_cpu;
394                 }
395                 if (!kvm->arch.sca->cpu[id].sda)
396                         kvm->arch.sca->cpu[id].sda =
397                                 (__u64) vcpu->arch.sie_block;
398                 vcpu->arch.sie_block->scaoh =
399                         (__u32)(((__u64)kvm->arch.sca) >> 32);
400                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
401                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
402         }
403
404         spin_lock_init(&vcpu->arch.local_int.lock);
405         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
406         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
407         spin_lock(&kvm->arch.float_int.lock);
408         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
409         init_waitqueue_head(&vcpu->arch.local_int.wq);
410         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
411         spin_unlock(&kvm->arch.float_int.lock);
412
413         rc = kvm_vcpu_init(vcpu, kvm, id);
414         if (rc)
415                 goto out_free_sie_block;
416         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
417                  vcpu->arch.sie_block);
418         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
419
420         return vcpu;
421 out_free_sie_block:
422         free_page((unsigned long)(vcpu->arch.sie_block));
423 out_free_cpu:
424         kfree(vcpu);
425 out:
426         return ERR_PTR(rc);
427 }
428
429 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
430 {
431         /* kvm common code refers to this, but never calls it */
432         BUG();
433         return 0;
434 }
435
436 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
437 {
438         /* kvm common code refers to this, but never calls it */
439         BUG();
440         return 0;
441 }
442
443 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
444                                            struct kvm_one_reg *reg)
445 {
446         int r = -EINVAL;
447
448         switch (reg->id) {
449         case KVM_REG_S390_TODPR:
450                 r = put_user(vcpu->arch.sie_block->todpr,
451                              (u32 __user *)reg->addr);
452                 break;
453         case KVM_REG_S390_EPOCHDIFF:
454                 r = put_user(vcpu->arch.sie_block->epoch,
455                              (u64 __user *)reg->addr);
456                 break;
457         case KVM_REG_S390_CPU_TIMER:
458                 r = put_user(vcpu->arch.sie_block->cputm,
459                              (u64 __user *)reg->addr);
460                 break;
461         case KVM_REG_S390_CLOCK_COMP:
462                 r = put_user(vcpu->arch.sie_block->ckc,
463                              (u64 __user *)reg->addr);
464                 break;
465         default:
466                 break;
467         }
468
469         return r;
470 }
471
472 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
473                                            struct kvm_one_reg *reg)
474 {
475         int r = -EINVAL;
476
477         switch (reg->id) {
478         case KVM_REG_S390_TODPR:
479                 r = get_user(vcpu->arch.sie_block->todpr,
480                              (u32 __user *)reg->addr);
481                 break;
482         case KVM_REG_S390_EPOCHDIFF:
483                 r = get_user(vcpu->arch.sie_block->epoch,
484                              (u64 __user *)reg->addr);
485                 break;
486         case KVM_REG_S390_CPU_TIMER:
487                 r = get_user(vcpu->arch.sie_block->cputm,
488                              (u64 __user *)reg->addr);
489                 break;
490         case KVM_REG_S390_CLOCK_COMP:
491                 r = get_user(vcpu->arch.sie_block->ckc,
492                              (u64 __user *)reg->addr);
493                 break;
494         default:
495                 break;
496         }
497
498         return r;
499 }
500
501 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
502 {
503         kvm_s390_vcpu_initial_reset(vcpu);
504         return 0;
505 }
506
507 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
508 {
509         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
510         return 0;
511 }
512
513 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
514 {
515         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
516         return 0;
517 }
518
519 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
520                                   struct kvm_sregs *sregs)
521 {
522         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
523         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
524         restore_access_regs(vcpu->run->s.regs.acrs);
525         return 0;
526 }
527
528 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
529                                   struct kvm_sregs *sregs)
530 {
531         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
532         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
533         return 0;
534 }
535
536 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
537 {
538         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
539         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
540         restore_fp_regs(&vcpu->arch.guest_fpregs);
541         return 0;
542 }
543
544 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
545 {
546         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
547         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
548         return 0;
549 }
550
551 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
552 {
553         int rc = 0;
554
555         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
556                 rc = -EBUSY;
557         else {
558                 vcpu->run->psw_mask = psw.mask;
559                 vcpu->run->psw_addr = psw.addr;
560         }
561         return rc;
562 }
563
564 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
565                                   struct kvm_translation *tr)
566 {
567         return -EINVAL; /* not implemented yet */
568 }
569
570 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
571                                         struct kvm_guest_debug *dbg)
572 {
573         return -EINVAL; /* not implemented yet */
574 }
575
576 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
577                                     struct kvm_mp_state *mp_state)
578 {
579         return -EINVAL; /* not implemented yet */
580 }
581
582 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
583                                     struct kvm_mp_state *mp_state)
584 {
585         return -EINVAL; /* not implemented yet */
586 }
587
588 static int __vcpu_run(struct kvm_vcpu *vcpu)
589 {
590         int rc;
591
592         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
593
594         if (need_resched())
595                 schedule();
596
597         if (test_thread_flag(TIF_MCCK_PENDING))
598                 s390_handle_mcck();
599
600         if (!kvm_is_ucontrol(vcpu->kvm))
601                 kvm_s390_deliver_pending_interrupts(vcpu);
602
603         vcpu->arch.sie_block->icptcode = 0;
604         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
605                    atomic_read(&vcpu->arch.sie_block->cpuflags));
606         trace_kvm_s390_sie_enter(vcpu,
607                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
608
609         /*
610          * As PF_VCPU will be used in fault handler, between guest_enter
611          * and guest_exit should be no uaccess.
612          */
613         preempt_disable();
614         kvm_guest_enter();
615         preempt_enable();
616         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
617         kvm_guest_exit();
618
619         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
620                    vcpu->arch.sie_block->icptcode);
621         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
622
623         if (rc) {
624                 if (kvm_is_ucontrol(vcpu->kvm)) {
625                         rc = SIE_INTERCEPT_UCONTROL;
626                 } else {
627                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
628                         trace_kvm_s390_sie_fault(vcpu);
629                         rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
630                 }
631         }
632
633         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
634         return rc;
635 }
636
637 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
638 {
639         int rc;
640         sigset_t sigsaved;
641
642 rerun_vcpu:
643         if (vcpu->sigset_active)
644                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
645
646         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
647
648         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
649
650         switch (kvm_run->exit_reason) {
651         case KVM_EXIT_S390_SIEIC:
652         case KVM_EXIT_UNKNOWN:
653         case KVM_EXIT_INTR:
654         case KVM_EXIT_S390_RESET:
655         case KVM_EXIT_S390_UCONTROL:
656         case KVM_EXIT_S390_TSCH:
657                 break;
658         default:
659                 BUG();
660         }
661
662         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
663         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
664         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
665                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
666                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
667         }
668         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
669                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
670                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
671                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
672         }
673
674         might_fault();
675
676         do {
677                 rc = __vcpu_run(vcpu);
678                 if (rc)
679                         break;
680                 if (kvm_is_ucontrol(vcpu->kvm))
681                         rc = -EOPNOTSUPP;
682                 else
683                         rc = kvm_handle_sie_intercept(vcpu);
684         } while (!signal_pending(current) && !rc);
685
686         if (rc == SIE_INTERCEPT_RERUNVCPU)
687                 goto rerun_vcpu;
688
689         if (signal_pending(current) && !rc) {
690                 kvm_run->exit_reason = KVM_EXIT_INTR;
691                 rc = -EINTR;
692         }
693
694 #ifdef CONFIG_KVM_S390_UCONTROL
695         if (rc == SIE_INTERCEPT_UCONTROL) {
696                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
697                 kvm_run->s390_ucontrol.trans_exc_code =
698                         current->thread.gmap_addr;
699                 kvm_run->s390_ucontrol.pgm_code = 0x10;
700                 rc = 0;
701         }
702 #endif
703
704         if (rc == -EOPNOTSUPP) {
705                 /* intercept cannot be handled in-kernel, prepare kvm-run */
706                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
707                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
708                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
709                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
710                 rc = 0;
711         }
712
713         if (rc == -EREMOTE) {
714                 /* intercept was handled, but userspace support is needed
715                  * kvm_run has been prepared by the handler */
716                 rc = 0;
717         }
718
719         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
720         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
721         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
722         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
723
724         if (vcpu->sigset_active)
725                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
726
727         vcpu->stat.exit_userspace++;
728         return rc;
729 }
730
731 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
732                        unsigned long n, int prefix)
733 {
734         if (prefix)
735                 return copy_to_guest(vcpu, guestdest, from, n);
736         else
737                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
738 }
739
740 /*
741  * store status at address
742  * we use have two special cases:
743  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
744  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
745  */
746 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
747 {
748         unsigned char archmode = 1;
749         int prefix;
750
751         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
752                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
753                         return -EFAULT;
754                 addr = SAVE_AREA_BASE;
755                 prefix = 0;
756         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
757                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
758                         return -EFAULT;
759                 addr = SAVE_AREA_BASE;
760                 prefix = 1;
761         } else
762                 prefix = 0;
763
764         /*
765          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
766          * copying in vcpu load/put. Lets update our copies before we save
767          * it into the save area
768          */
769         save_fp_regs(&vcpu->arch.guest_fpregs);
770         save_access_regs(vcpu->run->s.regs.acrs);
771
772         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
773                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
774                 return -EFAULT;
775
776         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
777                         vcpu->run->s.regs.gprs, 128, prefix))
778                 return -EFAULT;
779
780         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
781                         &vcpu->arch.sie_block->gpsw, 16, prefix))
782                 return -EFAULT;
783
784         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
785                         &vcpu->arch.sie_block->prefix, 4, prefix))
786                 return -EFAULT;
787
788         if (__guestcopy(vcpu,
789                         addr + offsetof(struct save_area, fp_ctrl_reg),
790                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
791                 return -EFAULT;
792
793         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
794                         &vcpu->arch.sie_block->todpr, 4, prefix))
795                 return -EFAULT;
796
797         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
798                         &vcpu->arch.sie_block->cputm, 8, prefix))
799                 return -EFAULT;
800
801         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
802                         &vcpu->arch.sie_block->ckc, 8, prefix))
803                 return -EFAULT;
804
805         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
806                         &vcpu->run->s.regs.acrs, 64, prefix))
807                 return -EFAULT;
808
809         if (__guestcopy(vcpu,
810                         addr + offsetof(struct save_area, ctrl_regs),
811                         &vcpu->arch.sie_block->gcr, 128, prefix))
812                 return -EFAULT;
813         return 0;
814 }
815
816 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
817                                      struct kvm_enable_cap *cap)
818 {
819         int r;
820
821         if (cap->flags)
822                 return -EINVAL;
823
824         switch (cap->cap) {
825         case KVM_CAP_S390_CSS_SUPPORT:
826                 if (!vcpu->kvm->arch.css_support) {
827                         vcpu->kvm->arch.css_support = 1;
828                         trace_kvm_s390_enable_css(vcpu->kvm);
829                 }
830                 r = 0;
831                 break;
832         default:
833                 r = -EINVAL;
834                 break;
835         }
836         return r;
837 }
838
839 long kvm_arch_vcpu_ioctl(struct file *filp,
840                          unsigned int ioctl, unsigned long arg)
841 {
842         struct kvm_vcpu *vcpu = filp->private_data;
843         void __user *argp = (void __user *)arg;
844         long r;
845
846         switch (ioctl) {
847         case KVM_S390_INTERRUPT: {
848                 struct kvm_s390_interrupt s390int;
849
850                 r = -EFAULT;
851                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
852                         break;
853                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
854                 break;
855         }
856         case KVM_S390_STORE_STATUS:
857                 r = kvm_s390_vcpu_store_status(vcpu, arg);
858                 break;
859         case KVM_S390_SET_INITIAL_PSW: {
860                 psw_t psw;
861
862                 r = -EFAULT;
863                 if (copy_from_user(&psw, argp, sizeof(psw)))
864                         break;
865                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
866                 break;
867         }
868         case KVM_S390_INITIAL_RESET:
869                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
870                 break;
871         case KVM_SET_ONE_REG:
872         case KVM_GET_ONE_REG: {
873                 struct kvm_one_reg reg;
874                 r = -EFAULT;
875                 if (copy_from_user(&reg, argp, sizeof(reg)))
876                         break;
877                 if (ioctl == KVM_SET_ONE_REG)
878                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
879                 else
880                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
881                 break;
882         }
883 #ifdef CONFIG_KVM_S390_UCONTROL
884         case KVM_S390_UCAS_MAP: {
885                 struct kvm_s390_ucas_mapping ucasmap;
886
887                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
888                         r = -EFAULT;
889                         break;
890                 }
891
892                 if (!kvm_is_ucontrol(vcpu->kvm)) {
893                         r = -EINVAL;
894                         break;
895                 }
896
897                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
898                                      ucasmap.vcpu_addr, ucasmap.length);
899                 break;
900         }
901         case KVM_S390_UCAS_UNMAP: {
902                 struct kvm_s390_ucas_mapping ucasmap;
903
904                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
905                         r = -EFAULT;
906                         break;
907                 }
908
909                 if (!kvm_is_ucontrol(vcpu->kvm)) {
910                         r = -EINVAL;
911                         break;
912                 }
913
914                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
915                         ucasmap.length);
916                 break;
917         }
918 #endif
919         case KVM_S390_VCPU_FAULT: {
920                 r = gmap_fault(arg, vcpu->arch.gmap);
921                 if (!IS_ERR_VALUE(r))
922                         r = 0;
923                 break;
924         }
925         case KVM_ENABLE_CAP:
926         {
927                 struct kvm_enable_cap cap;
928                 r = -EFAULT;
929                 if (copy_from_user(&cap, argp, sizeof(cap)))
930                         break;
931                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
932                 break;
933         }
934         default:
935                 r = -ENOTTY;
936         }
937         return r;
938 }
939
940 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
941 {
942 #ifdef CONFIG_KVM_S390_UCONTROL
943         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
944                  && (kvm_is_ucontrol(vcpu->kvm))) {
945                 vmf->page = virt_to_page(vcpu->arch.sie_block);
946                 get_page(vmf->page);
947                 return 0;
948         }
949 #endif
950         return VM_FAULT_SIGBUS;
951 }
952
953 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
954                             unsigned long npages)
955 {
956         return 0;
957 }
958
959 /* Section: memory related */
960 int kvm_arch_prepare_memory_region(struct kvm *kvm,
961                                    struct kvm_memory_slot *memslot,
962                                    struct kvm_userspace_memory_region *mem,
963                                    enum kvm_mr_change change)
964 {
965         /* A few sanity checks. We can have memory slots which have to be
966            located/ended at a segment boundary (1MB). The memory in userland is
967            ok to be fragmented into various different vmas. It is okay to mmap()
968            and munmap() stuff in this slot after doing this call at any time */
969
970         if (mem->userspace_addr & 0xffffful)
971                 return -EINVAL;
972
973         if (mem->memory_size & 0xffffful)
974                 return -EINVAL;
975
976         return 0;
977 }
978
979 void kvm_arch_commit_memory_region(struct kvm *kvm,
980                                 struct kvm_userspace_memory_region *mem,
981                                 const struct kvm_memory_slot *old,
982                                 enum kvm_mr_change change)
983 {
984         int rc;
985
986         /* If the basics of the memslot do not change, we do not want
987          * to update the gmap. Every update causes several unnecessary
988          * segment translation exceptions. This is usually handled just
989          * fine by the normal fault handler + gmap, but it will also
990          * cause faults on the prefix page of running guest CPUs.
991          */
992         if (old->userspace_addr == mem->userspace_addr &&
993             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
994             old->npages * PAGE_SIZE == mem->memory_size)
995                 return;
996
997         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
998                 mem->guest_phys_addr, mem->memory_size);
999         if (rc)
1000                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1001         return;
1002 }
1003
1004 static int __init kvm_s390_init(void)
1005 {
1006         int ret;
1007         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1008         if (ret)
1009                 return ret;
1010
1011         /*
1012          * guests can ask for up to 255+1 double words, we need a full page
1013          * to hold the maximum amount of facilities. On the other hand, we
1014          * only set facilities that are known to work in KVM.
1015          */
1016         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1017         if (!facilities) {
1018                 kvm_exit();
1019                 return -ENOMEM;
1020         }
1021         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1022         facilities[0] &= 0xff00fff3f47c0000ULL;
1023         facilities[1] &= 0x001c000000000000ULL;
1024         return 0;
1025 }
1026
1027 static void __exit kvm_s390_exit(void)
1028 {
1029         free_page((unsigned long) facilities);
1030         kvm_exit();
1031 }
1032
1033 module_init(kvm_s390_init);
1034 module_exit(kvm_s390_exit);