3b6a0cf25c7d19b7065c0ce6a7f76481ac1ba882
[firefly-linux-kernel-4.4.55.git] / arch / arm / kvm / psci.c
1 /*
2  * Copyright (C) 2012 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/kvm_host.h>
19 #include <linux/wait.h>
20
21 #include <asm/cputype.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_psci.h>
24
25 /*
26  * This is an implementation of the Power State Coordination Interface
27  * as described in ARM document number ARM DEN 0022A.
28  */
29
30 #define AFFINITY_MASK(level)    ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
31
32 static unsigned long psci_affinity_mask(unsigned long affinity_level)
33 {
34         if (affinity_level <= 3)
35                 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
36
37         return 0;
38 }
39
40 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
41 {
42         vcpu->arch.pause = true;
43 }
44
45 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
46 {
47         struct kvm *kvm = source_vcpu->kvm;
48         struct kvm_vcpu *vcpu = NULL, *tmp;
49         wait_queue_head_t *wq;
50         unsigned long cpu_id;
51         unsigned long mpidr;
52         phys_addr_t target_pc;
53         int i;
54
55         cpu_id = *vcpu_reg(source_vcpu, 1);
56         if (vcpu_mode_is_32bit(source_vcpu))
57                 cpu_id &= ~((u32) 0);
58
59         kvm_for_each_vcpu(i, tmp, kvm) {
60                 mpidr = kvm_vcpu_get_mpidr(tmp);
61                 if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
62                         vcpu = tmp;
63                         break;
64                 }
65         }
66
67         /*
68          * Make sure the caller requested a valid CPU and that the CPU is
69          * turned off.
70          */
71         if (!vcpu || !vcpu->arch.pause)
72                 return PSCI_RET_INVALID_PARAMS;
73
74         target_pc = *vcpu_reg(source_vcpu, 2);
75
76         kvm_reset_vcpu(vcpu);
77
78         /* Gracefully handle Thumb2 entry point */
79         if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
80                 target_pc &= ~((phys_addr_t) 1);
81                 vcpu_set_thumb(vcpu);
82         }
83
84         /* Propagate caller endianness */
85         if (kvm_vcpu_is_be(source_vcpu))
86                 kvm_vcpu_set_be(vcpu);
87
88         *vcpu_pc(vcpu) = target_pc;
89         vcpu->arch.pause = false;
90         smp_mb();               /* Make sure the above is visible */
91
92         wq = kvm_arch_vcpu_wq(vcpu);
93         wake_up_interruptible(wq);
94
95         return PSCI_RET_SUCCESS;
96 }
97
98 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
99 {
100         int i;
101         unsigned long mpidr;
102         unsigned long target_affinity;
103         unsigned long target_affinity_mask;
104         unsigned long lowest_affinity_level;
105         struct kvm *kvm = vcpu->kvm;
106         struct kvm_vcpu *tmp;
107
108         target_affinity = *vcpu_reg(vcpu, 1);
109         lowest_affinity_level = *vcpu_reg(vcpu, 2);
110
111         /* Determine target affinity mask */
112         target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
113         if (!target_affinity_mask)
114                 return PSCI_RET_INVALID_PARAMS;
115
116         /* Ignore other bits of target affinity */
117         target_affinity &= target_affinity_mask;
118
119         /*
120          * If one or more VCPU matching target affinity are running
121          * then ON else OFF
122          */
123         kvm_for_each_vcpu(i, tmp, kvm) {
124                 mpidr = kvm_vcpu_get_mpidr(tmp);
125                 if (((mpidr & target_affinity_mask) == target_affinity) &&
126                     !tmp->arch.pause) {
127                         return PSCI_0_2_AFFINITY_LEVEL_ON;
128                 }
129         }
130
131         return PSCI_0_2_AFFINITY_LEVEL_OFF;
132 }
133
134 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
135 {
136         memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
137         vcpu->run->system_event.type = type;
138         vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
139 }
140
141 static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
142 {
143         kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
144 }
145
146 static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
147 {
148         kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
149 }
150
151 int kvm_psci_version(struct kvm_vcpu *vcpu)
152 {
153         if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
154                 return KVM_ARM_PSCI_0_2;
155
156         return KVM_ARM_PSCI_0_1;
157 }
158
159 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
160 {
161         int ret = 1;
162         unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
163         unsigned long val;
164
165         switch (psci_fn) {
166         case PSCI_0_2_FN_PSCI_VERSION:
167                 /*
168                  * Bits[31:16] = Major Version = 0
169                  * Bits[15:0] = Minor Version = 2
170                  */
171                 val = 2;
172                 break;
173         case PSCI_0_2_FN_CPU_OFF:
174                 kvm_psci_vcpu_off(vcpu);
175                 val = PSCI_RET_SUCCESS;
176                 break;
177         case PSCI_0_2_FN_CPU_ON:
178         case PSCI_0_2_FN64_CPU_ON:
179                 val = kvm_psci_vcpu_on(vcpu);
180                 break;
181         case PSCI_0_2_FN_AFFINITY_INFO:
182         case PSCI_0_2_FN64_AFFINITY_INFO:
183                 val = kvm_psci_vcpu_affinity_info(vcpu);
184                 break;
185         case PSCI_0_2_FN_SYSTEM_OFF:
186                 kvm_psci_system_off(vcpu);
187                 /*
188                  * We should'nt be going back to guest VCPU after
189                  * receiving SYSTEM_OFF request.
190                  *
191                  * If user space accidently/deliberately resumes
192                  * guest VCPU after SYSTEM_OFF request then guest
193                  * VCPU should see internal failure from PSCI return
194                  * value. To achieve this, we preload r0 (or x0) with
195                  * PSCI return value INTERNAL_FAILURE.
196                  */
197                 val = PSCI_RET_INTERNAL_FAILURE;
198                 ret = 0;
199                 break;
200         case PSCI_0_2_FN_SYSTEM_RESET:
201                 kvm_psci_system_reset(vcpu);
202                 /*
203                  * Same reason as SYSTEM_OFF for preloading r0 (or x0)
204                  * with PSCI return value INTERNAL_FAILURE.
205                  */
206                 val = PSCI_RET_INTERNAL_FAILURE;
207                 ret = 0;
208                 break;
209         case PSCI_0_2_FN_CPU_SUSPEND:
210         case PSCI_0_2_FN_MIGRATE:
211         case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
212         case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
213         case PSCI_0_2_FN64_CPU_SUSPEND:
214         case PSCI_0_2_FN64_MIGRATE:
215         case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
216                 val = PSCI_RET_NOT_SUPPORTED;
217                 break;
218         default:
219                 return -EINVAL;
220         }
221
222         *vcpu_reg(vcpu, 0) = val;
223         return ret;
224 }
225
226 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
227 {
228         unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
229         unsigned long val;
230
231         switch (psci_fn) {
232         case KVM_PSCI_FN_CPU_OFF:
233                 kvm_psci_vcpu_off(vcpu);
234                 val = PSCI_RET_SUCCESS;
235                 break;
236         case KVM_PSCI_FN_CPU_ON:
237                 val = kvm_psci_vcpu_on(vcpu);
238                 break;
239         case KVM_PSCI_FN_CPU_SUSPEND:
240         case KVM_PSCI_FN_MIGRATE:
241                 val = PSCI_RET_NOT_SUPPORTED;
242                 break;
243         default:
244                 return -EINVAL;
245         }
246
247         *vcpu_reg(vcpu, 0) = val;
248         return 1;
249 }
250
251 /**
252  * kvm_psci_call - handle PSCI call if r0 value is in range
253  * @vcpu: Pointer to the VCPU struct
254  *
255  * Handle PSCI calls from guests through traps from HVC instructions.
256  * The calling convention is similar to SMC calls to the secure world
257  * where the function number is placed in r0.
258  *
259  * This function returns: > 0 (success), 0 (success but exit to user
260  * space), and < 0 (errors)
261  *
262  * Errors:
263  * -EINVAL: Unrecognized PSCI function
264  */
265 int kvm_psci_call(struct kvm_vcpu *vcpu)
266 {
267         switch (kvm_psci_version(vcpu)) {
268         case KVM_ARM_PSCI_0_2:
269                 return kvm_psci_0_2_call(vcpu);
270         case KVM_ARM_PSCI_0_1:
271                 return kvm_psci_0_1_call(vcpu);
272         default:
273                 return -EINVAL;
274         };
275 }