KVM: s390: add kvm stat counter for all diagnoses
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
31 #include <asm/pgtable.h>
32 #include <asm/nmi.h>
33 #include <asm/switch_to.h>
34 #include <asm/isc.h>
35 #include <asm/sclp.h>
36 #include "kvm-s390.h"
37 #include "gaccess.h"
38
39 #define KMSG_COMPONENT "kvm-s390"
40 #undef pr_fmt
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42
43 #define CREATE_TRACE_POINTS
44 #include "trace.h"
45 #include "trace-s390.h"
46
47 #define MEM_OP_MAX_SIZE 65536   /* Maximum transfer size for KVM_S390_MEM_OP */
48 #define LOCAL_IRQS 32
49 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
50                            (KVM_MAX_VCPUS + LOCAL_IRQS))
51
52 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
53
54 struct kvm_stats_debugfs_item debugfs_entries[] = {
55         { "userspace_handled", VCPU_STAT(exit_userspace) },
56         { "exit_null", VCPU_STAT(exit_null) },
57         { "exit_validity", VCPU_STAT(exit_validity) },
58         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
59         { "exit_external_request", VCPU_STAT(exit_external_request) },
60         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
61         { "exit_instruction", VCPU_STAT(exit_instruction) },
62         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
63         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
64         { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
65         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
66         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
67         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
68         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
69         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
70         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
71         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
72         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
73         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
74         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
75         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
76         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
77         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
78         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
79         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
80         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
81         { "instruction_spx", VCPU_STAT(instruction_spx) },
82         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
83         { "instruction_stap", VCPU_STAT(instruction_stap) },
84         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
85         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
86         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
87         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
88         { "instruction_essa", VCPU_STAT(instruction_essa) },
89         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
90         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
91         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
92         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
93         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
94         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
95         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
96         { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
97         { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
98         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
99         { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
100         { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
101         { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
102         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
103         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
104         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
105         { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
106         { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
107         { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
108         { "diagnose_10", VCPU_STAT(diagnose_10) },
109         { "diagnose_44", VCPU_STAT(diagnose_44) },
110         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
111         { "diagnose_258", VCPU_STAT(diagnose_258) },
112         { "diagnose_308", VCPU_STAT(diagnose_308) },
113         { "diagnose_500", VCPU_STAT(diagnose_500) },
114         { NULL }
115 };
116
117 /* upper facilities limit for kvm */
118 unsigned long kvm_s390_fac_list_mask[] = {
119         0xffe6fffbfcfdfc40UL,
120         0x005e800000000000UL,
121 };
122
123 unsigned long kvm_s390_fac_list_mask_size(void)
124 {
125         BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
126         return ARRAY_SIZE(kvm_s390_fac_list_mask);
127 }
128
129 static struct gmap_notifier gmap_notifier;
130
131 /* Section: not file related */
132 int kvm_arch_hardware_enable(void)
133 {
134         /* every s390 is virtualization enabled ;-) */
135         return 0;
136 }
137
138 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
139
140 int kvm_arch_hardware_setup(void)
141 {
142         gmap_notifier.notifier_call = kvm_gmap_notifier;
143         gmap_register_ipte_notifier(&gmap_notifier);
144         return 0;
145 }
146
147 void kvm_arch_hardware_unsetup(void)
148 {
149         gmap_unregister_ipte_notifier(&gmap_notifier);
150 }
151
152 int kvm_arch_init(void *opaque)
153 {
154         /* Register floating interrupt controller interface. */
155         return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
156 }
157
158 /* Section: device related */
159 long kvm_arch_dev_ioctl(struct file *filp,
160                         unsigned int ioctl, unsigned long arg)
161 {
162         if (ioctl == KVM_S390_ENABLE_SIE)
163                 return s390_enable_sie();
164         return -EINVAL;
165 }
166
167 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
168 {
169         int r;
170
171         switch (ext) {
172         case KVM_CAP_S390_PSW:
173         case KVM_CAP_S390_GMAP:
174         case KVM_CAP_SYNC_MMU:
175 #ifdef CONFIG_KVM_S390_UCONTROL
176         case KVM_CAP_S390_UCONTROL:
177 #endif
178         case KVM_CAP_ASYNC_PF:
179         case KVM_CAP_SYNC_REGS:
180         case KVM_CAP_ONE_REG:
181         case KVM_CAP_ENABLE_CAP:
182         case KVM_CAP_S390_CSS_SUPPORT:
183         case KVM_CAP_IOEVENTFD:
184         case KVM_CAP_DEVICE_CTRL:
185         case KVM_CAP_ENABLE_CAP_VM:
186         case KVM_CAP_S390_IRQCHIP:
187         case KVM_CAP_VM_ATTRIBUTES:
188         case KVM_CAP_MP_STATE:
189         case KVM_CAP_S390_INJECT_IRQ:
190         case KVM_CAP_S390_USER_SIGP:
191         case KVM_CAP_S390_USER_STSI:
192         case KVM_CAP_S390_SKEYS:
193         case KVM_CAP_S390_IRQ_STATE:
194                 r = 1;
195                 break;
196         case KVM_CAP_S390_MEM_OP:
197                 r = MEM_OP_MAX_SIZE;
198                 break;
199         case KVM_CAP_NR_VCPUS:
200         case KVM_CAP_MAX_VCPUS:
201                 r = KVM_MAX_VCPUS;
202                 break;
203         case KVM_CAP_NR_MEMSLOTS:
204                 r = KVM_USER_MEM_SLOTS;
205                 break;
206         case KVM_CAP_S390_COW:
207                 r = MACHINE_HAS_ESOP;
208                 break;
209         case KVM_CAP_S390_VECTOR_REGISTERS:
210                 r = MACHINE_HAS_VX;
211                 break;
212         default:
213                 r = 0;
214         }
215         return r;
216 }
217
218 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
219                                         struct kvm_memory_slot *memslot)
220 {
221         gfn_t cur_gfn, last_gfn;
222         unsigned long address;
223         struct gmap *gmap = kvm->arch.gmap;
224
225         down_read(&gmap->mm->mmap_sem);
226         /* Loop over all guest pages */
227         last_gfn = memslot->base_gfn + memslot->npages;
228         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
229                 address = gfn_to_hva_memslot(memslot, cur_gfn);
230
231                 if (gmap_test_and_clear_dirty(address, gmap))
232                         mark_page_dirty(kvm, cur_gfn);
233         }
234         up_read(&gmap->mm->mmap_sem);
235 }
236
237 /* Section: vm related */
238 /*
239  * Get (and clear) the dirty memory log for a memory slot.
240  */
241 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
242                                struct kvm_dirty_log *log)
243 {
244         int r;
245         unsigned long n;
246         struct kvm_memslots *slots;
247         struct kvm_memory_slot *memslot;
248         int is_dirty = 0;
249
250         mutex_lock(&kvm->slots_lock);
251
252         r = -EINVAL;
253         if (log->slot >= KVM_USER_MEM_SLOTS)
254                 goto out;
255
256         slots = kvm_memslots(kvm);
257         memslot = id_to_memslot(slots, log->slot);
258         r = -ENOENT;
259         if (!memslot->dirty_bitmap)
260                 goto out;
261
262         kvm_s390_sync_dirty_log(kvm, memslot);
263         r = kvm_get_dirty_log(kvm, log, &is_dirty);
264         if (r)
265                 goto out;
266
267         /* Clear the dirty log */
268         if (is_dirty) {
269                 n = kvm_dirty_bitmap_bytes(memslot);
270                 memset(memslot->dirty_bitmap, 0, n);
271         }
272         r = 0;
273 out:
274         mutex_unlock(&kvm->slots_lock);
275         return r;
276 }
277
278 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
279 {
280         int r;
281
282         if (cap->flags)
283                 return -EINVAL;
284
285         switch (cap->cap) {
286         case KVM_CAP_S390_IRQCHIP:
287                 kvm->arch.use_irqchip = 1;
288                 r = 0;
289                 break;
290         case KVM_CAP_S390_USER_SIGP:
291                 kvm->arch.user_sigp = 1;
292                 r = 0;
293                 break;
294         case KVM_CAP_S390_VECTOR_REGISTERS:
295                 if (MACHINE_HAS_VX) {
296                         set_kvm_facility(kvm->arch.model.fac->mask, 129);
297                         set_kvm_facility(kvm->arch.model.fac->list, 129);
298                         r = 0;
299                 } else
300                         r = -EINVAL;
301                 break;
302         case KVM_CAP_S390_USER_STSI:
303                 kvm->arch.user_stsi = 1;
304                 r = 0;
305                 break;
306         default:
307                 r = -EINVAL;
308                 break;
309         }
310         return r;
311 }
312
313 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
314 {
315         int ret;
316
317         switch (attr->attr) {
318         case KVM_S390_VM_MEM_LIMIT_SIZE:
319                 ret = 0;
320                 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
321                         ret = -EFAULT;
322                 break;
323         default:
324                 ret = -ENXIO;
325                 break;
326         }
327         return ret;
328 }
329
330 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
331 {
332         int ret;
333         unsigned int idx;
334         switch (attr->attr) {
335         case KVM_S390_VM_MEM_ENABLE_CMMA:
336                 /* enable CMMA only for z10 and later (EDAT_1) */
337                 ret = -EINVAL;
338                 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
339                         break;
340
341                 ret = -EBUSY;
342                 mutex_lock(&kvm->lock);
343                 if (atomic_read(&kvm->online_vcpus) == 0) {
344                         kvm->arch.use_cmma = 1;
345                         ret = 0;
346                 }
347                 mutex_unlock(&kvm->lock);
348                 break;
349         case KVM_S390_VM_MEM_CLR_CMMA:
350                 ret = -EINVAL;
351                 if (!kvm->arch.use_cmma)
352                         break;
353
354                 mutex_lock(&kvm->lock);
355                 idx = srcu_read_lock(&kvm->srcu);
356                 s390_reset_cmma(kvm->arch.gmap->mm);
357                 srcu_read_unlock(&kvm->srcu, idx);
358                 mutex_unlock(&kvm->lock);
359                 ret = 0;
360                 break;
361         case KVM_S390_VM_MEM_LIMIT_SIZE: {
362                 unsigned long new_limit;
363
364                 if (kvm_is_ucontrol(kvm))
365                         return -EINVAL;
366
367                 if (get_user(new_limit, (u64 __user *)attr->addr))
368                         return -EFAULT;
369
370                 if (new_limit > kvm->arch.gmap->asce_end)
371                         return -E2BIG;
372
373                 ret = -EBUSY;
374                 mutex_lock(&kvm->lock);
375                 if (atomic_read(&kvm->online_vcpus) == 0) {
376                         /* gmap_alloc will round the limit up */
377                         struct gmap *new = gmap_alloc(current->mm, new_limit);
378
379                         if (!new) {
380                                 ret = -ENOMEM;
381                         } else {
382                                 gmap_free(kvm->arch.gmap);
383                                 new->private = kvm;
384                                 kvm->arch.gmap = new;
385                                 ret = 0;
386                         }
387                 }
388                 mutex_unlock(&kvm->lock);
389                 break;
390         }
391         default:
392                 ret = -ENXIO;
393                 break;
394         }
395         return ret;
396 }
397
398 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
399
400 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
401 {
402         struct kvm_vcpu *vcpu;
403         int i;
404
405         if (!test_kvm_facility(kvm, 76))
406                 return -EINVAL;
407
408         mutex_lock(&kvm->lock);
409         switch (attr->attr) {
410         case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
411                 get_random_bytes(
412                         kvm->arch.crypto.crycb->aes_wrapping_key_mask,
413                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
414                 kvm->arch.crypto.aes_kw = 1;
415                 break;
416         case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
417                 get_random_bytes(
418                         kvm->arch.crypto.crycb->dea_wrapping_key_mask,
419                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
420                 kvm->arch.crypto.dea_kw = 1;
421                 break;
422         case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
423                 kvm->arch.crypto.aes_kw = 0;
424                 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
425                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
426                 break;
427         case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
428                 kvm->arch.crypto.dea_kw = 0;
429                 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
430                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
431                 break;
432         default:
433                 mutex_unlock(&kvm->lock);
434                 return -ENXIO;
435         }
436
437         kvm_for_each_vcpu(i, vcpu, kvm) {
438                 kvm_s390_vcpu_crypto_setup(vcpu);
439                 exit_sie(vcpu);
440         }
441         mutex_unlock(&kvm->lock);
442         return 0;
443 }
444
445 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
446 {
447         u8 gtod_high;
448
449         if (copy_from_user(&gtod_high, (void __user *)attr->addr,
450                                            sizeof(gtod_high)))
451                 return -EFAULT;
452
453         if (gtod_high != 0)
454                 return -EINVAL;
455
456         return 0;
457 }
458
459 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
460 {
461         struct kvm_vcpu *cur_vcpu;
462         unsigned int vcpu_idx;
463         u64 host_tod, gtod;
464         int r;
465
466         if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
467                 return -EFAULT;
468
469         r = store_tod_clock(&host_tod);
470         if (r)
471                 return r;
472
473         mutex_lock(&kvm->lock);
474         kvm->arch.epoch = gtod - host_tod;
475         kvm_s390_vcpu_block_all(kvm);
476         kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
477                 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
478         kvm_s390_vcpu_unblock_all(kvm);
479         mutex_unlock(&kvm->lock);
480         return 0;
481 }
482
483 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
484 {
485         int ret;
486
487         if (attr->flags)
488                 return -EINVAL;
489
490         switch (attr->attr) {
491         case KVM_S390_VM_TOD_HIGH:
492                 ret = kvm_s390_set_tod_high(kvm, attr);
493                 break;
494         case KVM_S390_VM_TOD_LOW:
495                 ret = kvm_s390_set_tod_low(kvm, attr);
496                 break;
497         default:
498                 ret = -ENXIO;
499                 break;
500         }
501         return ret;
502 }
503
504 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
505 {
506         u8 gtod_high = 0;
507
508         if (copy_to_user((void __user *)attr->addr, &gtod_high,
509                                          sizeof(gtod_high)))
510                 return -EFAULT;
511
512         return 0;
513 }
514
515 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
516 {
517         u64 host_tod, gtod;
518         int r;
519
520         r = store_tod_clock(&host_tod);
521         if (r)
522                 return r;
523
524         gtod = host_tod + kvm->arch.epoch;
525         if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
526                 return -EFAULT;
527
528         return 0;
529 }
530
531 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
532 {
533         int ret;
534
535         if (attr->flags)
536                 return -EINVAL;
537
538         switch (attr->attr) {
539         case KVM_S390_VM_TOD_HIGH:
540                 ret = kvm_s390_get_tod_high(kvm, attr);
541                 break;
542         case KVM_S390_VM_TOD_LOW:
543                 ret = kvm_s390_get_tod_low(kvm, attr);
544                 break;
545         default:
546                 ret = -ENXIO;
547                 break;
548         }
549         return ret;
550 }
551
552 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
553 {
554         struct kvm_s390_vm_cpu_processor *proc;
555         int ret = 0;
556
557         mutex_lock(&kvm->lock);
558         if (atomic_read(&kvm->online_vcpus)) {
559                 ret = -EBUSY;
560                 goto out;
561         }
562         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
563         if (!proc) {
564                 ret = -ENOMEM;
565                 goto out;
566         }
567         if (!copy_from_user(proc, (void __user *)attr->addr,
568                             sizeof(*proc))) {
569                 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
570                        sizeof(struct cpuid));
571                 kvm->arch.model.ibc = proc->ibc;
572                 memcpy(kvm->arch.model.fac->list, proc->fac_list,
573                        S390_ARCH_FAC_LIST_SIZE_BYTE);
574         } else
575                 ret = -EFAULT;
576         kfree(proc);
577 out:
578         mutex_unlock(&kvm->lock);
579         return ret;
580 }
581
582 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
583 {
584         int ret = -ENXIO;
585
586         switch (attr->attr) {
587         case KVM_S390_VM_CPU_PROCESSOR:
588                 ret = kvm_s390_set_processor(kvm, attr);
589                 break;
590         }
591         return ret;
592 }
593
594 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
595 {
596         struct kvm_s390_vm_cpu_processor *proc;
597         int ret = 0;
598
599         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
600         if (!proc) {
601                 ret = -ENOMEM;
602                 goto out;
603         }
604         memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
605         proc->ibc = kvm->arch.model.ibc;
606         memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
607         if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
608                 ret = -EFAULT;
609         kfree(proc);
610 out:
611         return ret;
612 }
613
614 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
615 {
616         struct kvm_s390_vm_cpu_machine *mach;
617         int ret = 0;
618
619         mach = kzalloc(sizeof(*mach), GFP_KERNEL);
620         if (!mach) {
621                 ret = -ENOMEM;
622                 goto out;
623         }
624         get_cpu_id((struct cpuid *) &mach->cpuid);
625         mach->ibc = sclp.ibc;
626         memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
627                S390_ARCH_FAC_LIST_SIZE_BYTE);
628         memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
629                S390_ARCH_FAC_LIST_SIZE_BYTE);
630         if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
631                 ret = -EFAULT;
632         kfree(mach);
633 out:
634         return ret;
635 }
636
637 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
638 {
639         int ret = -ENXIO;
640
641         switch (attr->attr) {
642         case KVM_S390_VM_CPU_PROCESSOR:
643                 ret = kvm_s390_get_processor(kvm, attr);
644                 break;
645         case KVM_S390_VM_CPU_MACHINE:
646                 ret = kvm_s390_get_machine(kvm, attr);
647                 break;
648         }
649         return ret;
650 }
651
652 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
653 {
654         int ret;
655
656         switch (attr->group) {
657         case KVM_S390_VM_MEM_CTRL:
658                 ret = kvm_s390_set_mem_control(kvm, attr);
659                 break;
660         case KVM_S390_VM_TOD:
661                 ret = kvm_s390_set_tod(kvm, attr);
662                 break;
663         case KVM_S390_VM_CPU_MODEL:
664                 ret = kvm_s390_set_cpu_model(kvm, attr);
665                 break;
666         case KVM_S390_VM_CRYPTO:
667                 ret = kvm_s390_vm_set_crypto(kvm, attr);
668                 break;
669         default:
670                 ret = -ENXIO;
671                 break;
672         }
673
674         return ret;
675 }
676
677 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
678 {
679         int ret;
680
681         switch (attr->group) {
682         case KVM_S390_VM_MEM_CTRL:
683                 ret = kvm_s390_get_mem_control(kvm, attr);
684                 break;
685         case KVM_S390_VM_TOD:
686                 ret = kvm_s390_get_tod(kvm, attr);
687                 break;
688         case KVM_S390_VM_CPU_MODEL:
689                 ret = kvm_s390_get_cpu_model(kvm, attr);
690                 break;
691         default:
692                 ret = -ENXIO;
693                 break;
694         }
695
696         return ret;
697 }
698
699 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
700 {
701         int ret;
702
703         switch (attr->group) {
704         case KVM_S390_VM_MEM_CTRL:
705                 switch (attr->attr) {
706                 case KVM_S390_VM_MEM_ENABLE_CMMA:
707                 case KVM_S390_VM_MEM_CLR_CMMA:
708                 case KVM_S390_VM_MEM_LIMIT_SIZE:
709                         ret = 0;
710                         break;
711                 default:
712                         ret = -ENXIO;
713                         break;
714                 }
715                 break;
716         case KVM_S390_VM_TOD:
717                 switch (attr->attr) {
718                 case KVM_S390_VM_TOD_LOW:
719                 case KVM_S390_VM_TOD_HIGH:
720                         ret = 0;
721                         break;
722                 default:
723                         ret = -ENXIO;
724                         break;
725                 }
726                 break;
727         case KVM_S390_VM_CPU_MODEL:
728                 switch (attr->attr) {
729                 case KVM_S390_VM_CPU_PROCESSOR:
730                 case KVM_S390_VM_CPU_MACHINE:
731                         ret = 0;
732                         break;
733                 default:
734                         ret = -ENXIO;
735                         break;
736                 }
737                 break;
738         case KVM_S390_VM_CRYPTO:
739                 switch (attr->attr) {
740                 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
741                 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
742                 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
743                 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
744                         ret = 0;
745                         break;
746                 default:
747                         ret = -ENXIO;
748                         break;
749                 }
750                 break;
751         default:
752                 ret = -ENXIO;
753                 break;
754         }
755
756         return ret;
757 }
758
759 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
760 {
761         uint8_t *keys;
762         uint64_t hva;
763         unsigned long curkey;
764         int i, r = 0;
765
766         if (args->flags != 0)
767                 return -EINVAL;
768
769         /* Is this guest using storage keys? */
770         if (!mm_use_skey(current->mm))
771                 return KVM_S390_GET_SKEYS_NONE;
772
773         /* Enforce sane limit on memory allocation */
774         if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
775                 return -EINVAL;
776
777         keys = kmalloc_array(args->count, sizeof(uint8_t),
778                              GFP_KERNEL | __GFP_NOWARN);
779         if (!keys)
780                 keys = vmalloc(sizeof(uint8_t) * args->count);
781         if (!keys)
782                 return -ENOMEM;
783
784         for (i = 0; i < args->count; i++) {
785                 hva = gfn_to_hva(kvm, args->start_gfn + i);
786                 if (kvm_is_error_hva(hva)) {
787                         r = -EFAULT;
788                         goto out;
789                 }
790
791                 curkey = get_guest_storage_key(current->mm, hva);
792                 if (IS_ERR_VALUE(curkey)) {
793                         r = curkey;
794                         goto out;
795                 }
796                 keys[i] = curkey;
797         }
798
799         r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
800                          sizeof(uint8_t) * args->count);
801         if (r)
802                 r = -EFAULT;
803 out:
804         kvfree(keys);
805         return r;
806 }
807
808 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
809 {
810         uint8_t *keys;
811         uint64_t hva;
812         int i, r = 0;
813
814         if (args->flags != 0)
815                 return -EINVAL;
816
817         /* Enforce sane limit on memory allocation */
818         if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
819                 return -EINVAL;
820
821         keys = kmalloc_array(args->count, sizeof(uint8_t),
822                              GFP_KERNEL | __GFP_NOWARN);
823         if (!keys)
824                 keys = vmalloc(sizeof(uint8_t) * args->count);
825         if (!keys)
826                 return -ENOMEM;
827
828         r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
829                            sizeof(uint8_t) * args->count);
830         if (r) {
831                 r = -EFAULT;
832                 goto out;
833         }
834
835         /* Enable storage key handling for the guest */
836         r = s390_enable_skey();
837         if (r)
838                 goto out;
839
840         for (i = 0; i < args->count; i++) {
841                 hva = gfn_to_hva(kvm, args->start_gfn + i);
842                 if (kvm_is_error_hva(hva)) {
843                         r = -EFAULT;
844                         goto out;
845                 }
846
847                 /* Lowest order bit is reserved */
848                 if (keys[i] & 0x01) {
849                         r = -EINVAL;
850                         goto out;
851                 }
852
853                 r = set_guest_storage_key(current->mm, hva,
854                                           (unsigned long)keys[i], 0);
855                 if (r)
856                         goto out;
857         }
858 out:
859         kvfree(keys);
860         return r;
861 }
862
863 long kvm_arch_vm_ioctl(struct file *filp,
864                        unsigned int ioctl, unsigned long arg)
865 {
866         struct kvm *kvm = filp->private_data;
867         void __user *argp = (void __user *)arg;
868         struct kvm_device_attr attr;
869         int r;
870
871         switch (ioctl) {
872         case KVM_S390_INTERRUPT: {
873                 struct kvm_s390_interrupt s390int;
874
875                 r = -EFAULT;
876                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
877                         break;
878                 r = kvm_s390_inject_vm(kvm, &s390int);
879                 break;
880         }
881         case KVM_ENABLE_CAP: {
882                 struct kvm_enable_cap cap;
883                 r = -EFAULT;
884                 if (copy_from_user(&cap, argp, sizeof(cap)))
885                         break;
886                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
887                 break;
888         }
889         case KVM_CREATE_IRQCHIP: {
890                 struct kvm_irq_routing_entry routing;
891
892                 r = -EINVAL;
893                 if (kvm->arch.use_irqchip) {
894                         /* Set up dummy routing. */
895                         memset(&routing, 0, sizeof(routing));
896                         kvm_set_irq_routing(kvm, &routing, 0, 0);
897                         r = 0;
898                 }
899                 break;
900         }
901         case KVM_SET_DEVICE_ATTR: {
902                 r = -EFAULT;
903                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
904                         break;
905                 r = kvm_s390_vm_set_attr(kvm, &attr);
906                 break;
907         }
908         case KVM_GET_DEVICE_ATTR: {
909                 r = -EFAULT;
910                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
911                         break;
912                 r = kvm_s390_vm_get_attr(kvm, &attr);
913                 break;
914         }
915         case KVM_HAS_DEVICE_ATTR: {
916                 r = -EFAULT;
917                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
918                         break;
919                 r = kvm_s390_vm_has_attr(kvm, &attr);
920                 break;
921         }
922         case KVM_S390_GET_SKEYS: {
923                 struct kvm_s390_skeys args;
924
925                 r = -EFAULT;
926                 if (copy_from_user(&args, argp,
927                                    sizeof(struct kvm_s390_skeys)))
928                         break;
929                 r = kvm_s390_get_skeys(kvm, &args);
930                 break;
931         }
932         case KVM_S390_SET_SKEYS: {
933                 struct kvm_s390_skeys args;
934
935                 r = -EFAULT;
936                 if (copy_from_user(&args, argp,
937                                    sizeof(struct kvm_s390_skeys)))
938                         break;
939                 r = kvm_s390_set_skeys(kvm, &args);
940                 break;
941         }
942         default:
943                 r = -ENOTTY;
944         }
945
946         return r;
947 }
948
949 static int kvm_s390_query_ap_config(u8 *config)
950 {
951         u32 fcn_code = 0x04000000UL;
952         u32 cc = 0;
953
954         memset(config, 0, 128);
955         asm volatile(
956                 "lgr 0,%1\n"
957                 "lgr 2,%2\n"
958                 ".long 0xb2af0000\n"            /* PQAP(QCI) */
959                 "0: ipm %0\n"
960                 "srl %0,28\n"
961                 "1:\n"
962                 EX_TABLE(0b, 1b)
963                 : "+r" (cc)
964                 : "r" (fcn_code), "r" (config)
965                 : "cc", "0", "2", "memory"
966         );
967
968         return cc;
969 }
970
971 static int kvm_s390_apxa_installed(void)
972 {
973         u8 config[128];
974         int cc;
975
976         if (test_facility(2) && test_facility(12)) {
977                 cc = kvm_s390_query_ap_config(config);
978
979                 if (cc)
980                         pr_err("PQAP(QCI) failed with cc=%d", cc);
981                 else
982                         return config[0] & 0x40;
983         }
984
985         return 0;
986 }
987
988 static void kvm_s390_set_crycb_format(struct kvm *kvm)
989 {
990         kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
991
992         if (kvm_s390_apxa_installed())
993                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
994         else
995                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
996 }
997
998 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
999 {
1000         get_cpu_id(cpu_id);
1001         cpu_id->version = 0xff;
1002 }
1003
1004 static int kvm_s390_crypto_init(struct kvm *kvm)
1005 {
1006         if (!test_kvm_facility(kvm, 76))
1007                 return 0;
1008
1009         kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1010                                          GFP_KERNEL | GFP_DMA);
1011         if (!kvm->arch.crypto.crycb)
1012                 return -ENOMEM;
1013
1014         kvm_s390_set_crycb_format(kvm);
1015
1016         /* Enable AES/DEA protected key functions by default */
1017         kvm->arch.crypto.aes_kw = 1;
1018         kvm->arch.crypto.dea_kw = 1;
1019         get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1020                          sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1021         get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1022                          sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1023
1024         return 0;
1025 }
1026
1027 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1028 {
1029         int i, rc;
1030         char debug_name[16];
1031         static unsigned long sca_offset;
1032
1033         rc = -EINVAL;
1034 #ifdef CONFIG_KVM_S390_UCONTROL
1035         if (type & ~KVM_VM_S390_UCONTROL)
1036                 goto out_err;
1037         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1038                 goto out_err;
1039 #else
1040         if (type)
1041                 goto out_err;
1042 #endif
1043
1044         rc = s390_enable_sie();
1045         if (rc)
1046                 goto out_err;
1047
1048         rc = -ENOMEM;
1049
1050         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1051         if (!kvm->arch.sca)
1052                 goto out_err;
1053         spin_lock(&kvm_lock);
1054         sca_offset = (sca_offset + 16) & 0x7f0;
1055         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1056         spin_unlock(&kvm_lock);
1057
1058         sprintf(debug_name, "kvm-%u", current->pid);
1059
1060         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1061         if (!kvm->arch.dbf)
1062                 goto out_err;
1063
1064         /*
1065          * The architectural maximum amount of facilities is 16 kbit. To store
1066          * this amount, 2 kbyte of memory is required. Thus we need a full
1067          * page to hold the guest facility list (arch.model.fac->list) and the
1068          * facility mask (arch.model.fac->mask). Its address size has to be
1069          * 31 bits and word aligned.
1070          */
1071         kvm->arch.model.fac =
1072                 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1073         if (!kvm->arch.model.fac)
1074                 goto out_err;
1075
1076         /* Populate the facility mask initially. */
1077         memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1078                S390_ARCH_FAC_LIST_SIZE_BYTE);
1079         for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1080                 if (i < kvm_s390_fac_list_mask_size())
1081                         kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1082                 else
1083                         kvm->arch.model.fac->mask[i] = 0UL;
1084         }
1085
1086         /* Populate the facility list initially. */
1087         memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1088                S390_ARCH_FAC_LIST_SIZE_BYTE);
1089
1090         kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1091         kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1092
1093         if (kvm_s390_crypto_init(kvm) < 0)
1094                 goto out_err;
1095
1096         spin_lock_init(&kvm->arch.float_int.lock);
1097         for (i = 0; i < FIRQ_LIST_COUNT; i++)
1098                 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1099         init_waitqueue_head(&kvm->arch.ipte_wq);
1100         mutex_init(&kvm->arch.ipte_mutex);
1101
1102         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1103         VM_EVENT(kvm, 3, "%s", "vm created");
1104
1105         if (type & KVM_VM_S390_UCONTROL) {
1106                 kvm->arch.gmap = NULL;
1107         } else {
1108                 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
1109                 if (!kvm->arch.gmap)
1110                         goto out_err;
1111                 kvm->arch.gmap->private = kvm;
1112                 kvm->arch.gmap->pfault_enabled = 0;
1113         }
1114
1115         kvm->arch.css_support = 0;
1116         kvm->arch.use_irqchip = 0;
1117         kvm->arch.epoch = 0;
1118
1119         spin_lock_init(&kvm->arch.start_stop_lock);
1120
1121         return 0;
1122 out_err:
1123         kfree(kvm->arch.crypto.crycb);
1124         free_page((unsigned long)kvm->arch.model.fac);
1125         debug_unregister(kvm->arch.dbf);
1126         free_page((unsigned long)(kvm->arch.sca));
1127         return rc;
1128 }
1129
1130 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1131 {
1132         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1133         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1134         kvm_s390_clear_local_irqs(vcpu);
1135         kvm_clear_async_pf_completion_queue(vcpu);
1136         if (!kvm_is_ucontrol(vcpu->kvm)) {
1137                 clear_bit(63 - vcpu->vcpu_id,
1138                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1139                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1140                     (__u64) vcpu->arch.sie_block)
1141                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1142         }
1143         smp_mb();
1144
1145         if (kvm_is_ucontrol(vcpu->kvm))
1146                 gmap_free(vcpu->arch.gmap);
1147
1148         if (vcpu->kvm->arch.use_cmma)
1149                 kvm_s390_vcpu_unsetup_cmma(vcpu);
1150         free_page((unsigned long)(vcpu->arch.sie_block));
1151
1152         kvm_vcpu_uninit(vcpu);
1153         kmem_cache_free(kvm_vcpu_cache, vcpu);
1154 }
1155
1156 static void kvm_free_vcpus(struct kvm *kvm)
1157 {
1158         unsigned int i;
1159         struct kvm_vcpu *vcpu;
1160
1161         kvm_for_each_vcpu(i, vcpu, kvm)
1162                 kvm_arch_vcpu_destroy(vcpu);
1163
1164         mutex_lock(&kvm->lock);
1165         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1166                 kvm->vcpus[i] = NULL;
1167
1168         atomic_set(&kvm->online_vcpus, 0);
1169         mutex_unlock(&kvm->lock);
1170 }
1171
1172 void kvm_arch_destroy_vm(struct kvm *kvm)
1173 {
1174         kvm_free_vcpus(kvm);
1175         free_page((unsigned long)kvm->arch.model.fac);
1176         free_page((unsigned long)(kvm->arch.sca));
1177         debug_unregister(kvm->arch.dbf);
1178         kfree(kvm->arch.crypto.crycb);
1179         if (!kvm_is_ucontrol(kvm))
1180                 gmap_free(kvm->arch.gmap);
1181         kvm_s390_destroy_adapters(kvm);
1182         kvm_s390_clear_float_irqs(kvm);
1183 }
1184
1185 /* Section: vcpu related */
1186 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1187 {
1188         vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1189         if (!vcpu->arch.gmap)
1190                 return -ENOMEM;
1191         vcpu->arch.gmap->private = vcpu->kvm;
1192
1193         return 0;
1194 }
1195
1196 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1197 {
1198         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1199         kvm_clear_async_pf_completion_queue(vcpu);
1200         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1201                                     KVM_SYNC_GPRS |
1202                                     KVM_SYNC_ACRS |
1203                                     KVM_SYNC_CRS |
1204                                     KVM_SYNC_ARCH0 |
1205                                     KVM_SYNC_PFAULT;
1206         if (test_kvm_facility(vcpu->kvm, 129))
1207                 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1208
1209         if (kvm_is_ucontrol(vcpu->kvm))
1210                 return __kvm_ucontrol_vcpu_init(vcpu);
1211
1212         return 0;
1213 }
1214
1215 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1216 {
1217         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1218         if (test_kvm_facility(vcpu->kvm, 129))
1219                 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1220         else
1221                 save_fp_regs(vcpu->arch.host_fpregs.fprs);
1222         save_access_regs(vcpu->arch.host_acrs);
1223         if (test_kvm_facility(vcpu->kvm, 129)) {
1224                 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1225                 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1226         } else {
1227                 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1228                 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1229         }
1230         restore_access_regs(vcpu->run->s.regs.acrs);
1231         gmap_enable(vcpu->arch.gmap);
1232         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1233 }
1234
1235 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1236 {
1237         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1238         gmap_disable(vcpu->arch.gmap);
1239         if (test_kvm_facility(vcpu->kvm, 129)) {
1240                 save_fp_ctl(&vcpu->run->s.regs.fpc);
1241                 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1242         } else {
1243                 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1244                 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1245         }
1246         save_access_regs(vcpu->run->s.regs.acrs);
1247         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1248         if (test_kvm_facility(vcpu->kvm, 129))
1249                 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1250         else
1251                 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1252         restore_access_regs(vcpu->arch.host_acrs);
1253 }
1254
1255 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1256 {
1257         /* this equals initial cpu reset in pop, but we don't switch to ESA */
1258         vcpu->arch.sie_block->gpsw.mask = 0UL;
1259         vcpu->arch.sie_block->gpsw.addr = 0UL;
1260         kvm_s390_set_prefix(vcpu, 0);
1261         vcpu->arch.sie_block->cputm     = 0UL;
1262         vcpu->arch.sie_block->ckc       = 0UL;
1263         vcpu->arch.sie_block->todpr     = 0;
1264         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1265         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1266         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1267         vcpu->arch.guest_fpregs.fpc = 0;
1268         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1269         vcpu->arch.sie_block->gbea = 1;
1270         vcpu->arch.sie_block->pp = 0;
1271         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1272         kvm_clear_async_pf_completion_queue(vcpu);
1273         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1274                 kvm_s390_vcpu_stop(vcpu);
1275         kvm_s390_clear_local_irqs(vcpu);
1276 }
1277
1278 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1279 {
1280         mutex_lock(&vcpu->kvm->lock);
1281         vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1282         mutex_unlock(&vcpu->kvm->lock);
1283         if (!kvm_is_ucontrol(vcpu->kvm))
1284                 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1285 }
1286
1287 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1288 {
1289         if (!test_kvm_facility(vcpu->kvm, 76))
1290                 return;
1291
1292         vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1293
1294         if (vcpu->kvm->arch.crypto.aes_kw)
1295                 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1296         if (vcpu->kvm->arch.crypto.dea_kw)
1297                 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1298
1299         vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1300 }
1301
1302 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1303 {
1304         free_page(vcpu->arch.sie_block->cbrlo);
1305         vcpu->arch.sie_block->cbrlo = 0;
1306 }
1307
1308 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1309 {
1310         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1311         if (!vcpu->arch.sie_block->cbrlo)
1312                 return -ENOMEM;
1313
1314         vcpu->arch.sie_block->ecb2 |= 0x80;
1315         vcpu->arch.sie_block->ecb2 &= ~0x08;
1316         return 0;
1317 }
1318
1319 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1320 {
1321         struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1322
1323         vcpu->arch.cpu_id = model->cpu_id;
1324         vcpu->arch.sie_block->ibc = model->ibc;
1325         vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1326 }
1327
1328 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1329 {
1330         int rc = 0;
1331
1332         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1333                                                     CPUSTAT_SM |
1334                                                     CPUSTAT_STOPPED);
1335
1336         if (test_kvm_facility(vcpu->kvm, 78))
1337                 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1338         else if (test_kvm_facility(vcpu->kvm, 8))
1339                 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1340
1341         kvm_s390_vcpu_setup_model(vcpu);
1342
1343         vcpu->arch.sie_block->ecb   = 6;
1344         if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1345                 vcpu->arch.sie_block->ecb |= 0x10;
1346
1347         vcpu->arch.sie_block->ecb2  = 8;
1348         vcpu->arch.sie_block->eca   = 0xC1002000U;
1349         if (sclp.has_siif)
1350                 vcpu->arch.sie_block->eca |= 1;
1351         if (sclp.has_sigpif)
1352                 vcpu->arch.sie_block->eca |= 0x10000000U;
1353         if (test_kvm_facility(vcpu->kvm, 129)) {
1354                 vcpu->arch.sie_block->eca |= 0x00020000;
1355                 vcpu->arch.sie_block->ecd |= 0x20000000;
1356         }
1357         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1358
1359         if (vcpu->kvm->arch.use_cmma) {
1360                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1361                 if (rc)
1362                         return rc;
1363         }
1364         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1365         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1366
1367         kvm_s390_vcpu_crypto_setup(vcpu);
1368
1369         return rc;
1370 }
1371
1372 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1373                                       unsigned int id)
1374 {
1375         struct kvm_vcpu *vcpu;
1376         struct sie_page *sie_page;
1377         int rc = -EINVAL;
1378
1379         if (id >= KVM_MAX_VCPUS)
1380                 goto out;
1381
1382         rc = -ENOMEM;
1383
1384         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1385         if (!vcpu)
1386                 goto out;
1387
1388         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1389         if (!sie_page)
1390                 goto out_free_cpu;
1391
1392         vcpu->arch.sie_block = &sie_page->sie_block;
1393         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1394         vcpu->arch.host_vregs = &sie_page->vregs;
1395
1396         vcpu->arch.sie_block->icpua = id;
1397         if (!kvm_is_ucontrol(kvm)) {
1398                 if (!kvm->arch.sca) {
1399                         WARN_ON_ONCE(1);
1400                         goto out_free_cpu;
1401                 }
1402                 if (!kvm->arch.sca->cpu[id].sda)
1403                         kvm->arch.sca->cpu[id].sda =
1404                                 (__u64) vcpu->arch.sie_block;
1405                 vcpu->arch.sie_block->scaoh =
1406                         (__u32)(((__u64)kvm->arch.sca) >> 32);
1407                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1408                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1409         }
1410
1411         spin_lock_init(&vcpu->arch.local_int.lock);
1412         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1413         vcpu->arch.local_int.wq = &vcpu->wq;
1414         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1415
1416         rc = kvm_vcpu_init(vcpu, kvm, id);
1417         if (rc)
1418                 goto out_free_sie_block;
1419         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1420                  vcpu->arch.sie_block);
1421         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1422
1423         return vcpu;
1424 out_free_sie_block:
1425         free_page((unsigned long)(vcpu->arch.sie_block));
1426 out_free_cpu:
1427         kmem_cache_free(kvm_vcpu_cache, vcpu);
1428 out:
1429         return ERR_PTR(rc);
1430 }
1431
1432 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1433 {
1434         return kvm_s390_vcpu_has_irq(vcpu, 0);
1435 }
1436
1437 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1438 {
1439         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1440         exit_sie(vcpu);
1441 }
1442
1443 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1444 {
1445         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1446 }
1447
1448 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1449 {
1450         atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1451         exit_sie(vcpu);
1452 }
1453
1454 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1455 {
1456         atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1457 }
1458
1459 /*
1460  * Kick a guest cpu out of SIE and wait until SIE is not running.
1461  * If the CPU is not running (e.g. waiting as idle) the function will
1462  * return immediately. */
1463 void exit_sie(struct kvm_vcpu *vcpu)
1464 {
1465         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1466         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1467                 cpu_relax();
1468 }
1469
1470 /* Kick a guest cpu out of SIE to process a request synchronously */
1471 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1472 {
1473         kvm_make_request(req, vcpu);
1474         kvm_s390_vcpu_request(vcpu);
1475 }
1476
1477 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1478 {
1479         int i;
1480         struct kvm *kvm = gmap->private;
1481         struct kvm_vcpu *vcpu;
1482
1483         kvm_for_each_vcpu(i, vcpu, kvm) {
1484                 /* match against both prefix pages */
1485                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1486                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1487                         kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1488                 }
1489         }
1490 }
1491
1492 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1493 {
1494         /* kvm common code refers to this, but never calls it */
1495         BUG();
1496         return 0;
1497 }
1498
1499 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1500                                            struct kvm_one_reg *reg)
1501 {
1502         int r = -EINVAL;
1503
1504         switch (reg->id) {
1505         case KVM_REG_S390_TODPR:
1506                 r = put_user(vcpu->arch.sie_block->todpr,
1507                              (u32 __user *)reg->addr);
1508                 break;
1509         case KVM_REG_S390_EPOCHDIFF:
1510                 r = put_user(vcpu->arch.sie_block->epoch,
1511                              (u64 __user *)reg->addr);
1512                 break;
1513         case KVM_REG_S390_CPU_TIMER:
1514                 r = put_user(vcpu->arch.sie_block->cputm,
1515                              (u64 __user *)reg->addr);
1516                 break;
1517         case KVM_REG_S390_CLOCK_COMP:
1518                 r = put_user(vcpu->arch.sie_block->ckc,
1519                              (u64 __user *)reg->addr);
1520                 break;
1521         case KVM_REG_S390_PFTOKEN:
1522                 r = put_user(vcpu->arch.pfault_token,
1523                              (u64 __user *)reg->addr);
1524                 break;
1525         case KVM_REG_S390_PFCOMPARE:
1526                 r = put_user(vcpu->arch.pfault_compare,
1527                              (u64 __user *)reg->addr);
1528                 break;
1529         case KVM_REG_S390_PFSELECT:
1530                 r = put_user(vcpu->arch.pfault_select,
1531                              (u64 __user *)reg->addr);
1532                 break;
1533         case KVM_REG_S390_PP:
1534                 r = put_user(vcpu->arch.sie_block->pp,
1535                              (u64 __user *)reg->addr);
1536                 break;
1537         case KVM_REG_S390_GBEA:
1538                 r = put_user(vcpu->arch.sie_block->gbea,
1539                              (u64 __user *)reg->addr);
1540                 break;
1541         default:
1542                 break;
1543         }
1544
1545         return r;
1546 }
1547
1548 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1549                                            struct kvm_one_reg *reg)
1550 {
1551         int r = -EINVAL;
1552
1553         switch (reg->id) {
1554         case KVM_REG_S390_TODPR:
1555                 r = get_user(vcpu->arch.sie_block->todpr,
1556                              (u32 __user *)reg->addr);
1557                 break;
1558         case KVM_REG_S390_EPOCHDIFF:
1559                 r = get_user(vcpu->arch.sie_block->epoch,
1560                              (u64 __user *)reg->addr);
1561                 break;
1562         case KVM_REG_S390_CPU_TIMER:
1563                 r = get_user(vcpu->arch.sie_block->cputm,
1564                              (u64 __user *)reg->addr);
1565                 break;
1566         case KVM_REG_S390_CLOCK_COMP:
1567                 r = get_user(vcpu->arch.sie_block->ckc,
1568                              (u64 __user *)reg->addr);
1569                 break;
1570         case KVM_REG_S390_PFTOKEN:
1571                 r = get_user(vcpu->arch.pfault_token,
1572                              (u64 __user *)reg->addr);
1573                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1574                         kvm_clear_async_pf_completion_queue(vcpu);
1575                 break;
1576         case KVM_REG_S390_PFCOMPARE:
1577                 r = get_user(vcpu->arch.pfault_compare,
1578                              (u64 __user *)reg->addr);
1579                 break;
1580         case KVM_REG_S390_PFSELECT:
1581                 r = get_user(vcpu->arch.pfault_select,
1582                              (u64 __user *)reg->addr);
1583                 break;
1584         case KVM_REG_S390_PP:
1585                 r = get_user(vcpu->arch.sie_block->pp,
1586                              (u64 __user *)reg->addr);
1587                 break;
1588         case KVM_REG_S390_GBEA:
1589                 r = get_user(vcpu->arch.sie_block->gbea,
1590                              (u64 __user *)reg->addr);
1591                 break;
1592         default:
1593                 break;
1594         }
1595
1596         return r;
1597 }
1598
1599 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1600 {
1601         kvm_s390_vcpu_initial_reset(vcpu);
1602         return 0;
1603 }
1604
1605 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1606 {
1607         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1608         return 0;
1609 }
1610
1611 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1612 {
1613         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1614         return 0;
1615 }
1616
1617 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1618                                   struct kvm_sregs *sregs)
1619 {
1620         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1621         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1622         restore_access_regs(vcpu->run->s.regs.acrs);
1623         return 0;
1624 }
1625
1626 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1627                                   struct kvm_sregs *sregs)
1628 {
1629         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1630         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1631         return 0;
1632 }
1633
1634 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1635 {
1636         if (test_fp_ctl(fpu->fpc))
1637                 return -EINVAL;
1638         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1639         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1640         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1641         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1642         return 0;
1643 }
1644
1645 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1646 {
1647         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1648         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1649         return 0;
1650 }
1651
1652 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1653 {
1654         int rc = 0;
1655
1656         if (!is_vcpu_stopped(vcpu))
1657                 rc = -EBUSY;
1658         else {
1659                 vcpu->run->psw_mask = psw.mask;
1660                 vcpu->run->psw_addr = psw.addr;
1661         }
1662         return rc;
1663 }
1664
1665 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1666                                   struct kvm_translation *tr)
1667 {
1668         return -EINVAL; /* not implemented yet */
1669 }
1670
1671 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1672                               KVM_GUESTDBG_USE_HW_BP | \
1673                               KVM_GUESTDBG_ENABLE)
1674
1675 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1676                                         struct kvm_guest_debug *dbg)
1677 {
1678         int rc = 0;
1679
1680         vcpu->guest_debug = 0;
1681         kvm_s390_clear_bp_data(vcpu);
1682
1683         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1684                 return -EINVAL;
1685
1686         if (dbg->control & KVM_GUESTDBG_ENABLE) {
1687                 vcpu->guest_debug = dbg->control;
1688                 /* enforce guest PER */
1689                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1690
1691                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1692                         rc = kvm_s390_import_bp_data(vcpu, dbg);
1693         } else {
1694                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1695                 vcpu->arch.guestdbg.last_bp = 0;
1696         }
1697
1698         if (rc) {
1699                 vcpu->guest_debug = 0;
1700                 kvm_s390_clear_bp_data(vcpu);
1701                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1702         }
1703
1704         return rc;
1705 }
1706
1707 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1708                                     struct kvm_mp_state *mp_state)
1709 {
1710         /* CHECK_STOP and LOAD are not supported yet */
1711         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1712                                        KVM_MP_STATE_OPERATING;
1713 }
1714
1715 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1716                                     struct kvm_mp_state *mp_state)
1717 {
1718         int rc = 0;
1719
1720         /* user space knows about this interface - let it control the state */
1721         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1722
1723         switch (mp_state->mp_state) {
1724         case KVM_MP_STATE_STOPPED:
1725                 kvm_s390_vcpu_stop(vcpu);
1726                 break;
1727         case KVM_MP_STATE_OPERATING:
1728                 kvm_s390_vcpu_start(vcpu);
1729                 break;
1730         case KVM_MP_STATE_LOAD:
1731         case KVM_MP_STATE_CHECK_STOP:
1732                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1733         default:
1734                 rc = -ENXIO;
1735         }
1736
1737         return rc;
1738 }
1739
1740 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1741 {
1742         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1743 }
1744
1745 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1746 {
1747         if (!vcpu->requests)
1748                 return 0;
1749 retry:
1750         kvm_s390_vcpu_request_handled(vcpu);
1751         /*
1752          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1753          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1754          * This ensures that the ipte instruction for this request has
1755          * already finished. We might race against a second unmapper that
1756          * wants to set the blocking bit. Lets just retry the request loop.
1757          */
1758         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1759                 int rc;
1760                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1761                                       kvm_s390_get_prefix(vcpu),
1762                                       PAGE_SIZE * 2);
1763                 if (rc)
1764                         return rc;
1765                 goto retry;
1766         }
1767
1768         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1769                 vcpu->arch.sie_block->ihcpu = 0xffff;
1770                 goto retry;
1771         }
1772
1773         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1774                 if (!ibs_enabled(vcpu)) {
1775                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1776                         atomic_set_mask(CPUSTAT_IBS,
1777                                         &vcpu->arch.sie_block->cpuflags);
1778                 }
1779                 goto retry;
1780         }
1781
1782         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1783                 if (ibs_enabled(vcpu)) {
1784                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1785                         atomic_clear_mask(CPUSTAT_IBS,
1786                                           &vcpu->arch.sie_block->cpuflags);
1787                 }
1788                 goto retry;
1789         }
1790
1791         /* nothing to do, just clear the request */
1792         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1793
1794         return 0;
1795 }
1796
1797 /**
1798  * kvm_arch_fault_in_page - fault-in guest page if necessary
1799  * @vcpu: The corresponding virtual cpu
1800  * @gpa: Guest physical address
1801  * @writable: Whether the page should be writable or not
1802  *
1803  * Make sure that a guest page has been faulted-in on the host.
1804  *
1805  * Return: Zero on success, negative error code otherwise.
1806  */
1807 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1808 {
1809         return gmap_fault(vcpu->arch.gmap, gpa,
1810                           writable ? FAULT_FLAG_WRITE : 0);
1811 }
1812
1813 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1814                                       unsigned long token)
1815 {
1816         struct kvm_s390_interrupt inti;
1817         struct kvm_s390_irq irq;
1818
1819         if (start_token) {
1820                 irq.u.ext.ext_params2 = token;
1821                 irq.type = KVM_S390_INT_PFAULT_INIT;
1822                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
1823         } else {
1824                 inti.type = KVM_S390_INT_PFAULT_DONE;
1825                 inti.parm64 = token;
1826                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1827         }
1828 }
1829
1830 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1831                                      struct kvm_async_pf *work)
1832 {
1833         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1834         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1835 }
1836
1837 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1838                                  struct kvm_async_pf *work)
1839 {
1840         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1841         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1842 }
1843
1844 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1845                                struct kvm_async_pf *work)
1846 {
1847         /* s390 will always inject the page directly */
1848 }
1849
1850 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1851 {
1852         /*
1853          * s390 will always inject the page directly,
1854          * but we still want check_async_completion to cleanup
1855          */
1856         return true;
1857 }
1858
1859 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1860 {
1861         hva_t hva;
1862         struct kvm_arch_async_pf arch;
1863         int rc;
1864
1865         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1866                 return 0;
1867         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1868             vcpu->arch.pfault_compare)
1869                 return 0;
1870         if (psw_extint_disabled(vcpu))
1871                 return 0;
1872         if (kvm_s390_vcpu_has_irq(vcpu, 0))
1873                 return 0;
1874         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1875                 return 0;
1876         if (!vcpu->arch.gmap->pfault_enabled)
1877                 return 0;
1878
1879         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1880         hva += current->thread.gmap_addr & ~PAGE_MASK;
1881         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1882                 return 0;
1883
1884         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1885         return rc;
1886 }
1887
1888 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1889 {
1890         int rc, cpuflags;
1891
1892         /*
1893          * On s390 notifications for arriving pages will be delivered directly
1894          * to the guest but the house keeping for completed pfaults is
1895          * handled outside the worker.
1896          */
1897         kvm_check_async_pf_completion(vcpu);
1898
1899         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1900
1901         if (need_resched())
1902                 schedule();
1903
1904         if (test_cpu_flag(CIF_MCCK_PENDING))
1905                 s390_handle_mcck();
1906
1907         if (!kvm_is_ucontrol(vcpu->kvm)) {
1908                 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1909                 if (rc)
1910                         return rc;
1911         }
1912
1913         rc = kvm_s390_handle_requests(vcpu);
1914         if (rc)
1915                 return rc;
1916
1917         if (guestdbg_enabled(vcpu)) {
1918                 kvm_s390_backup_guest_per_regs(vcpu);
1919                 kvm_s390_patch_guest_per_regs(vcpu);
1920         }
1921
1922         vcpu->arch.sie_block->icptcode = 0;
1923         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1924         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1925         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1926
1927         return 0;
1928 }
1929
1930 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1931 {
1932         psw_t *psw = &vcpu->arch.sie_block->gpsw;
1933         u8 opcode;
1934         int rc;
1935
1936         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1937         trace_kvm_s390_sie_fault(vcpu);
1938
1939         /*
1940          * We want to inject an addressing exception, which is defined as a
1941          * suppressing or terminating exception. However, since we came here
1942          * by a DAT access exception, the PSW still points to the faulting
1943          * instruction since DAT exceptions are nullifying. So we've got
1944          * to look up the current opcode to get the length of the instruction
1945          * to be able to forward the PSW.
1946          */
1947         rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1948         if (rc)
1949                 return kvm_s390_inject_prog_cond(vcpu, rc);
1950         psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1951
1952         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1953 }
1954
1955 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1956 {
1957         int rc = -1;
1958
1959         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1960                    vcpu->arch.sie_block->icptcode);
1961         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1962
1963         if (guestdbg_enabled(vcpu))
1964                 kvm_s390_restore_guest_per_regs(vcpu);
1965
1966         if (exit_reason >= 0) {
1967                 rc = 0;
1968         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1969                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1970                 vcpu->run->s390_ucontrol.trans_exc_code =
1971                                                 current->thread.gmap_addr;
1972                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1973                 rc = -EREMOTE;
1974
1975         } else if (current->thread.gmap_pfault) {
1976                 trace_kvm_s390_major_guest_pfault(vcpu);
1977                 current->thread.gmap_pfault = 0;
1978                 if (kvm_arch_setup_async_pf(vcpu)) {
1979                         rc = 0;
1980                 } else {
1981                         gpa_t gpa = current->thread.gmap_addr;
1982                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1983                 }
1984         }
1985
1986         if (rc == -1)
1987                 rc = vcpu_post_run_fault_in_sie(vcpu);
1988
1989         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1990
1991         if (rc == 0) {
1992                 if (kvm_is_ucontrol(vcpu->kvm))
1993                         /* Don't exit for host interrupts. */
1994                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1995                 else
1996                         rc = kvm_handle_sie_intercept(vcpu);
1997         }
1998
1999         return rc;
2000 }
2001
2002 static int __vcpu_run(struct kvm_vcpu *vcpu)
2003 {
2004         int rc, exit_reason;
2005
2006         /*
2007          * We try to hold kvm->srcu during most of vcpu_run (except when run-
2008          * ning the guest), so that memslots (and other stuff) are protected
2009          */
2010         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2011
2012         do {
2013                 rc = vcpu_pre_run(vcpu);
2014                 if (rc)
2015                         break;
2016
2017                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2018                 /*
2019                  * As PF_VCPU will be used in fault handler, between
2020                  * guest_enter and guest_exit should be no uaccess.
2021                  */
2022                 local_irq_disable();
2023                 __kvm_guest_enter();
2024                 local_irq_enable();
2025                 exit_reason = sie64a(vcpu->arch.sie_block,
2026                                      vcpu->run->s.regs.gprs);
2027                 local_irq_disable();
2028                 __kvm_guest_exit();
2029                 local_irq_enable();
2030                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2031
2032                 rc = vcpu_post_run(vcpu, exit_reason);
2033         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2034
2035         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2036         return rc;
2037 }
2038
2039 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2040 {
2041         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2042         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2043         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2044                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2045         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2046                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2047                 /* some control register changes require a tlb flush */
2048                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2049         }
2050         if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2051                 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2052                 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2053                 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2054                 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2055                 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2056         }
2057         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2058                 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2059                 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2060                 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2061                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2062                         kvm_clear_async_pf_completion_queue(vcpu);
2063         }
2064         kvm_run->kvm_dirty_regs = 0;
2065 }
2066
2067 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2068 {
2069         kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2070         kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2071         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2072         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2073         kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2074         kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2075         kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2076         kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2077         kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2078         kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2079         kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2080         kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2081 }
2082
2083 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2084 {
2085         int rc;
2086         sigset_t sigsaved;
2087
2088         if (guestdbg_exit_pending(vcpu)) {
2089                 kvm_s390_prepare_debug_exit(vcpu);
2090                 return 0;
2091         }
2092
2093         if (vcpu->sigset_active)
2094                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2095
2096         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2097                 kvm_s390_vcpu_start(vcpu);
2098         } else if (is_vcpu_stopped(vcpu)) {
2099                 pr_err_ratelimited("can't run stopped vcpu %d\n",
2100                                    vcpu->vcpu_id);
2101                 return -EINVAL;
2102         }
2103
2104         sync_regs(vcpu, kvm_run);
2105
2106         might_fault();
2107         rc = __vcpu_run(vcpu);
2108
2109         if (signal_pending(current) && !rc) {
2110                 kvm_run->exit_reason = KVM_EXIT_INTR;
2111                 rc = -EINTR;
2112         }
2113
2114         if (guestdbg_exit_pending(vcpu) && !rc)  {
2115                 kvm_s390_prepare_debug_exit(vcpu);
2116                 rc = 0;
2117         }
2118
2119         if (rc == -EOPNOTSUPP) {
2120                 /* intercept cannot be handled in-kernel, prepare kvm-run */
2121                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
2122                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2123                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
2124                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
2125                 rc = 0;
2126         }
2127
2128         if (rc == -EREMOTE) {
2129                 /* intercept was handled, but userspace support is needed
2130                  * kvm_run has been prepared by the handler */
2131                 rc = 0;
2132         }
2133
2134         store_regs(vcpu, kvm_run);
2135
2136         if (vcpu->sigset_active)
2137                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2138
2139         vcpu->stat.exit_userspace++;
2140         return rc;
2141 }
2142
2143 /*
2144  * store status at address
2145  * we use have two special cases:
2146  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2147  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2148  */
2149 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2150 {
2151         unsigned char archmode = 1;
2152         unsigned int px;
2153         u64 clkcomp;
2154         int rc;
2155
2156         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2157                 if (write_guest_abs(vcpu, 163, &archmode, 1))
2158                         return -EFAULT;
2159                 gpa = SAVE_AREA_BASE;
2160         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2161                 if (write_guest_real(vcpu, 163, &archmode, 1))
2162                         return -EFAULT;
2163                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2164         }
2165         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2166                              vcpu->arch.guest_fpregs.fprs, 128);
2167         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2168                               vcpu->run->s.regs.gprs, 128);
2169         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2170                               &vcpu->arch.sie_block->gpsw, 16);
2171         px = kvm_s390_get_prefix(vcpu);
2172         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2173                               &px, 4);
2174         rc |= write_guest_abs(vcpu,
2175                               gpa + offsetof(struct save_area, fp_ctrl_reg),
2176                               &vcpu->arch.guest_fpregs.fpc, 4);
2177         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2178                               &vcpu->arch.sie_block->todpr, 4);
2179         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2180                               &vcpu->arch.sie_block->cputm, 8);
2181         clkcomp = vcpu->arch.sie_block->ckc >> 8;
2182         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2183                               &clkcomp, 8);
2184         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2185                               &vcpu->run->s.regs.acrs, 64);
2186         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2187                               &vcpu->arch.sie_block->gcr, 128);
2188         return rc ? -EFAULT : 0;
2189 }
2190
2191 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2192 {
2193         /*
2194          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2195          * copying in vcpu load/put. Lets update our copies before we save
2196          * it into the save area
2197          */
2198         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2199         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2200         save_access_regs(vcpu->run->s.regs.acrs);
2201
2202         return kvm_s390_store_status_unloaded(vcpu, addr);
2203 }
2204
2205 /*
2206  * store additional status at address
2207  */
2208 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2209                                         unsigned long gpa)
2210 {
2211         /* Only bits 0-53 are used for address formation */
2212         if (!(gpa & ~0x3ff))
2213                 return 0;
2214
2215         return write_guest_abs(vcpu, gpa & ~0x3ff,
2216                                (void *)&vcpu->run->s.regs.vrs, 512);
2217 }
2218
2219 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2220 {
2221         if (!test_kvm_facility(vcpu->kvm, 129))
2222                 return 0;
2223
2224         /*
2225          * The guest VXRS are in the host VXRs due to the lazy
2226          * copying in vcpu load/put. Let's update our copies before we save
2227          * it into the save area.
2228          */
2229         save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2230
2231         return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2232 }
2233
2234 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2235 {
2236         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2237         kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2238 }
2239
2240 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2241 {
2242         unsigned int i;
2243         struct kvm_vcpu *vcpu;
2244
2245         kvm_for_each_vcpu(i, vcpu, kvm) {
2246                 __disable_ibs_on_vcpu(vcpu);
2247         }
2248 }
2249
2250 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2251 {
2252         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2253         kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2254 }
2255
2256 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2257 {
2258         int i, online_vcpus, started_vcpus = 0;
2259
2260         if (!is_vcpu_stopped(vcpu))
2261                 return;
2262
2263         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2264         /* Only one cpu at a time may enter/leave the STOPPED state. */
2265         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2266         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2267
2268         for (i = 0; i < online_vcpus; i++) {
2269                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2270                         started_vcpus++;
2271         }
2272
2273         if (started_vcpus == 0) {
2274                 /* we're the only active VCPU -> speed it up */
2275                 __enable_ibs_on_vcpu(vcpu);
2276         } else if (started_vcpus == 1) {
2277                 /*
2278                  * As we are starting a second VCPU, we have to disable
2279                  * the IBS facility on all VCPUs to remove potentially
2280                  * oustanding ENABLE requests.
2281                  */
2282                 __disable_ibs_on_all_vcpus(vcpu->kvm);
2283         }
2284
2285         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2286         /*
2287          * Another VCPU might have used IBS while we were offline.
2288          * Let's play safe and flush the VCPU at startup.
2289          */
2290         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2291         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2292         return;
2293 }
2294
2295 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2296 {
2297         int i, online_vcpus, started_vcpus = 0;
2298         struct kvm_vcpu *started_vcpu = NULL;
2299
2300         if (is_vcpu_stopped(vcpu))
2301                 return;
2302
2303         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2304         /* Only one cpu at a time may enter/leave the STOPPED state. */
2305         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2306         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2307
2308         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2309         kvm_s390_clear_stop_irq(vcpu);
2310
2311         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2312         __disable_ibs_on_vcpu(vcpu);
2313
2314         for (i = 0; i < online_vcpus; i++) {
2315                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2316                         started_vcpus++;
2317                         started_vcpu = vcpu->kvm->vcpus[i];
2318                 }
2319         }
2320
2321         if (started_vcpus == 1) {
2322                 /*
2323                  * As we only have one VCPU left, we want to enable the
2324                  * IBS facility for that VCPU to speed it up.
2325                  */
2326                 __enable_ibs_on_vcpu(started_vcpu);
2327         }
2328
2329         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2330         return;
2331 }
2332
2333 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2334                                      struct kvm_enable_cap *cap)
2335 {
2336         int r;
2337
2338         if (cap->flags)
2339                 return -EINVAL;
2340
2341         switch (cap->cap) {
2342         case KVM_CAP_S390_CSS_SUPPORT:
2343                 if (!vcpu->kvm->arch.css_support) {
2344                         vcpu->kvm->arch.css_support = 1;
2345                         trace_kvm_s390_enable_css(vcpu->kvm);
2346                 }
2347                 r = 0;
2348                 break;
2349         default:
2350                 r = -EINVAL;
2351                 break;
2352         }
2353         return r;
2354 }
2355
2356 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2357                                   struct kvm_s390_mem_op *mop)
2358 {
2359         void __user *uaddr = (void __user *)mop->buf;
2360         void *tmpbuf = NULL;
2361         int r, srcu_idx;
2362         const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2363                                     | KVM_S390_MEMOP_F_CHECK_ONLY;
2364
2365         if (mop->flags & ~supported_flags)
2366                 return -EINVAL;
2367
2368         if (mop->size > MEM_OP_MAX_SIZE)
2369                 return -E2BIG;
2370
2371         if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2372                 tmpbuf = vmalloc(mop->size);
2373                 if (!tmpbuf)
2374                         return -ENOMEM;
2375         }
2376
2377         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2378
2379         switch (mop->op) {
2380         case KVM_S390_MEMOP_LOGICAL_READ:
2381                 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2382                         r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2383                         break;
2384                 }
2385                 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2386                 if (r == 0) {
2387                         if (copy_to_user(uaddr, tmpbuf, mop->size))
2388                                 r = -EFAULT;
2389                 }
2390                 break;
2391         case KVM_S390_MEMOP_LOGICAL_WRITE:
2392                 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2393                         r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2394                         break;
2395                 }
2396                 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2397                         r = -EFAULT;
2398                         break;
2399                 }
2400                 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2401                 break;
2402         default:
2403                 r = -EINVAL;
2404         }
2405
2406         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2407
2408         if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2409                 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2410
2411         vfree(tmpbuf);
2412         return r;
2413 }
2414
2415 long kvm_arch_vcpu_ioctl(struct file *filp,
2416                          unsigned int ioctl, unsigned long arg)
2417 {
2418         struct kvm_vcpu *vcpu = filp->private_data;
2419         void __user *argp = (void __user *)arg;
2420         int idx;
2421         long r;
2422
2423         switch (ioctl) {
2424         case KVM_S390_IRQ: {
2425                 struct kvm_s390_irq s390irq;
2426
2427                 r = -EFAULT;
2428                 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2429                         break;
2430                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2431                 break;
2432         }
2433         case KVM_S390_INTERRUPT: {
2434                 struct kvm_s390_interrupt s390int;
2435                 struct kvm_s390_irq s390irq;
2436
2437                 r = -EFAULT;
2438                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2439                         break;
2440                 if (s390int_to_s390irq(&s390int, &s390irq))
2441                         return -EINVAL;
2442                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2443                 break;
2444         }
2445         case KVM_S390_STORE_STATUS:
2446                 idx = srcu_read_lock(&vcpu->kvm->srcu);
2447                 r = kvm_s390_vcpu_store_status(vcpu, arg);
2448                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2449                 break;
2450         case KVM_S390_SET_INITIAL_PSW: {
2451                 psw_t psw;
2452
2453                 r = -EFAULT;
2454                 if (copy_from_user(&psw, argp, sizeof(psw)))
2455                         break;
2456                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2457                 break;
2458         }
2459         case KVM_S390_INITIAL_RESET:
2460                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2461                 break;
2462         case KVM_SET_ONE_REG:
2463         case KVM_GET_ONE_REG: {
2464                 struct kvm_one_reg reg;
2465                 r = -EFAULT;
2466                 if (copy_from_user(&reg, argp, sizeof(reg)))
2467                         break;
2468                 if (ioctl == KVM_SET_ONE_REG)
2469                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2470                 else
2471                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2472                 break;
2473         }
2474 #ifdef CONFIG_KVM_S390_UCONTROL
2475         case KVM_S390_UCAS_MAP: {
2476                 struct kvm_s390_ucas_mapping ucasmap;
2477
2478                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2479                         r = -EFAULT;
2480                         break;
2481                 }
2482
2483                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2484                         r = -EINVAL;
2485                         break;
2486                 }
2487
2488                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2489                                      ucasmap.vcpu_addr, ucasmap.length);
2490                 break;
2491         }
2492         case KVM_S390_UCAS_UNMAP: {
2493                 struct kvm_s390_ucas_mapping ucasmap;
2494
2495                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2496                         r = -EFAULT;
2497                         break;
2498                 }
2499
2500                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2501                         r = -EINVAL;
2502                         break;
2503                 }
2504
2505                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2506                         ucasmap.length);
2507                 break;
2508         }
2509 #endif
2510         case KVM_S390_VCPU_FAULT: {
2511                 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2512                 break;
2513         }
2514         case KVM_ENABLE_CAP:
2515         {
2516                 struct kvm_enable_cap cap;
2517                 r = -EFAULT;
2518                 if (copy_from_user(&cap, argp, sizeof(cap)))
2519                         break;
2520                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2521                 break;
2522         }
2523         case KVM_S390_MEM_OP: {
2524                 struct kvm_s390_mem_op mem_op;
2525
2526                 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2527                         r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2528                 else
2529                         r = -EFAULT;
2530                 break;
2531         }
2532         case KVM_S390_SET_IRQ_STATE: {
2533                 struct kvm_s390_irq_state irq_state;
2534
2535                 r = -EFAULT;
2536                 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2537                         break;
2538                 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2539                     irq_state.len == 0 ||
2540                     irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2541                         r = -EINVAL;
2542                         break;
2543                 }
2544                 r = kvm_s390_set_irq_state(vcpu,
2545                                            (void __user *) irq_state.buf,
2546                                            irq_state.len);
2547                 break;
2548         }
2549         case KVM_S390_GET_IRQ_STATE: {
2550                 struct kvm_s390_irq_state irq_state;
2551
2552                 r = -EFAULT;
2553                 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2554                         break;
2555                 if (irq_state.len == 0) {
2556                         r = -EINVAL;
2557                         break;
2558                 }
2559                 r = kvm_s390_get_irq_state(vcpu,
2560                                            (__u8 __user *)  irq_state.buf,
2561                                            irq_state.len);
2562                 break;
2563         }
2564         default:
2565                 r = -ENOTTY;
2566         }
2567         return r;
2568 }
2569
2570 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2571 {
2572 #ifdef CONFIG_KVM_S390_UCONTROL
2573         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2574                  && (kvm_is_ucontrol(vcpu->kvm))) {
2575                 vmf->page = virt_to_page(vcpu->arch.sie_block);
2576                 get_page(vmf->page);
2577                 return 0;
2578         }
2579 #endif
2580         return VM_FAULT_SIGBUS;
2581 }
2582
2583 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2584                             unsigned long npages)
2585 {
2586         return 0;
2587 }
2588
2589 /* Section: memory related */
2590 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2591                                    struct kvm_memory_slot *memslot,
2592                                    const struct kvm_userspace_memory_region *mem,
2593                                    enum kvm_mr_change change)
2594 {
2595         /* A few sanity checks. We can have memory slots which have to be
2596            located/ended at a segment boundary (1MB). The memory in userland is
2597            ok to be fragmented into various different vmas. It is okay to mmap()
2598            and munmap() stuff in this slot after doing this call at any time */
2599
2600         if (mem->userspace_addr & 0xffffful)
2601                 return -EINVAL;
2602
2603         if (mem->memory_size & 0xffffful)
2604                 return -EINVAL;
2605
2606         return 0;
2607 }
2608
2609 void kvm_arch_commit_memory_region(struct kvm *kvm,
2610                                 const struct kvm_userspace_memory_region *mem,
2611                                 const struct kvm_memory_slot *old,
2612                                 const struct kvm_memory_slot *new,
2613                                 enum kvm_mr_change change)
2614 {
2615         int rc;
2616
2617         /* If the basics of the memslot do not change, we do not want
2618          * to update the gmap. Every update causes several unnecessary
2619          * segment translation exceptions. This is usually handled just
2620          * fine by the normal fault handler + gmap, but it will also
2621          * cause faults on the prefix page of running guest CPUs.
2622          */
2623         if (old->userspace_addr == mem->userspace_addr &&
2624             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2625             old->npages * PAGE_SIZE == mem->memory_size)
2626                 return;
2627
2628         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2629                 mem->guest_phys_addr, mem->memory_size);
2630         if (rc)
2631                 pr_warn("failed to commit memory region\n");
2632         return;
2633 }
2634
2635 static int __init kvm_s390_init(void)
2636 {
2637         return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2638 }
2639
2640 static void __exit kvm_s390_exit(void)
2641 {
2642         kvm_exit();
2643 }
2644
2645 module_init(kvm_s390_init);
2646 module_exit(kvm_s390_exit);
2647
2648 /*
2649  * Enable autoloading of the kvm module.
2650  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2651  * since x86 takes a different approach.
2652  */
2653 #include <linux/miscdevice.h>
2654 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2655 MODULE_ALIAS("devname:kvm");