arm64: dts: rockchip: amend usb-otg related nodes for rk3368-tb
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / insn.c
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/opcodes.h>
34 #include <asm/insn.h>
35
36 #define AARCH64_INSN_SF_BIT     BIT(31)
37 #define AARCH64_INSN_N_BIT      BIT(22)
38
39 static int aarch64_insn_encoding_class[] = {
40         AARCH64_INSN_CLS_UNKNOWN,
41         AARCH64_INSN_CLS_UNKNOWN,
42         AARCH64_INSN_CLS_UNKNOWN,
43         AARCH64_INSN_CLS_UNKNOWN,
44         AARCH64_INSN_CLS_LDST,
45         AARCH64_INSN_CLS_DP_REG,
46         AARCH64_INSN_CLS_LDST,
47         AARCH64_INSN_CLS_DP_FPSIMD,
48         AARCH64_INSN_CLS_DP_IMM,
49         AARCH64_INSN_CLS_DP_IMM,
50         AARCH64_INSN_CLS_BR_SYS,
51         AARCH64_INSN_CLS_BR_SYS,
52         AARCH64_INSN_CLS_LDST,
53         AARCH64_INSN_CLS_DP_REG,
54         AARCH64_INSN_CLS_LDST,
55         AARCH64_INSN_CLS_DP_FPSIMD,
56 };
57
58 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
59 {
60         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
61 }
62
63 /* NOP is an alias of HINT */
64 bool __kprobes aarch64_insn_is_nop(u32 insn)
65 {
66         if (!aarch64_insn_is_hint(insn))
67                 return false;
68
69         switch (insn & 0xFE0) {
70         case AARCH64_INSN_HINT_YIELD:
71         case AARCH64_INSN_HINT_WFE:
72         case AARCH64_INSN_HINT_WFI:
73         case AARCH64_INSN_HINT_SEV:
74         case AARCH64_INSN_HINT_SEVL:
75                 return false;
76         default:
77                 return true;
78         }
79 }
80
81 bool aarch64_insn_is_branch_imm(u32 insn)
82 {
83         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
84                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
85                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
86                 aarch64_insn_is_bcond(insn));
87 }
88
89 static DEFINE_RAW_SPINLOCK(patch_lock);
90
91 static void __kprobes *patch_map(void *addr, int fixmap)
92 {
93         unsigned long uintaddr = (uintptr_t) addr;
94         bool module = !core_kernel_text(uintaddr);
95         struct page *page;
96
97         if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
98                 page = vmalloc_to_page(addr);
99         else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
100                 page = virt_to_page(addr);
101         else
102                 return addr;
103
104         BUG_ON(!page);
105         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
106                         (uintaddr & ~PAGE_MASK));
107 }
108
109 static void __kprobes patch_unmap(int fixmap)
110 {
111         clear_fixmap(fixmap);
112 }
113 /*
114  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
115  * little-endian.
116  */
117 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
118 {
119         int ret;
120         u32 val;
121
122         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
123         if (!ret)
124                 *insnp = le32_to_cpu(val);
125
126         return ret;
127 }
128
129 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
130 {
131         void *waddr = addr;
132         unsigned long flags = 0;
133         int ret;
134
135         raw_spin_lock_irqsave(&patch_lock, flags);
136         waddr = patch_map(addr, FIX_TEXT_POKE0);
137
138         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
139
140         patch_unmap(FIX_TEXT_POKE0);
141         raw_spin_unlock_irqrestore(&patch_lock, flags);
142
143         return ret;
144 }
145
146 int __kprobes aarch64_insn_write(void *addr, u32 insn)
147 {
148         insn = cpu_to_le32(insn);
149         return __aarch64_insn_write(addr, insn);
150 }
151
152 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
153 {
154         if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
155                 return false;
156
157         return  aarch64_insn_is_b(insn) ||
158                 aarch64_insn_is_bl(insn) ||
159                 aarch64_insn_is_svc(insn) ||
160                 aarch64_insn_is_hvc(insn) ||
161                 aarch64_insn_is_smc(insn) ||
162                 aarch64_insn_is_brk(insn) ||
163                 aarch64_insn_is_nop(insn);
164 }
165
166 bool __kprobes aarch64_insn_uses_literal(u32 insn)
167 {
168         /* ldr/ldrsw (literal), prfm */
169
170         return aarch64_insn_is_ldr_lit(insn) ||
171                 aarch64_insn_is_ldrsw_lit(insn) ||
172                 aarch64_insn_is_adr_adrp(insn) ||
173                 aarch64_insn_is_prfm_lit(insn);
174 }
175
176 bool __kprobes aarch64_insn_is_branch(u32 insn)
177 {
178         /* b, bl, cb*, tb*, b.cond, br, blr */
179
180         return aarch64_insn_is_b(insn) ||
181                 aarch64_insn_is_bl(insn) ||
182                 aarch64_insn_is_cbz(insn) ||
183                 aarch64_insn_is_cbnz(insn) ||
184                 aarch64_insn_is_tbz(insn) ||
185                 aarch64_insn_is_tbnz(insn) ||
186                 aarch64_insn_is_ret(insn) ||
187                 aarch64_insn_is_br(insn) ||
188                 aarch64_insn_is_blr(insn) ||
189                 aarch64_insn_is_bcond(insn);
190 }
191
192 /*
193  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
194  * Section B2.6.5 "Concurrent modification and execution of instructions":
195  * Concurrent modification and execution of instructions can lead to the
196  * resulting instruction performing any behavior that can be achieved by
197  * executing any sequence of instructions that can be executed from the
198  * same Exception level, except where the instruction before modification
199  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
200  * or SMC instruction.
201  */
202 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
203 {
204         return __aarch64_insn_hotpatch_safe(old_insn) &&
205                __aarch64_insn_hotpatch_safe(new_insn);
206 }
207
208 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
209 {
210         u32 *tp = addr;
211         int ret;
212
213         /* A64 instructions must be word aligned */
214         if ((uintptr_t)tp & 0x3)
215                 return -EINVAL;
216
217         ret = aarch64_insn_write(tp, insn);
218         if (ret == 0)
219                 flush_icache_range((uintptr_t)tp,
220                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
221
222         return ret;
223 }
224
225 struct aarch64_insn_patch {
226         void            **text_addrs;
227         u32             *new_insns;
228         int             insn_cnt;
229         atomic_t        cpu_count;
230 };
231
232 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
233 {
234         int i, ret = 0;
235         struct aarch64_insn_patch *pp = arg;
236
237         /* The first CPU becomes master */
238         if (atomic_inc_return(&pp->cpu_count) == 1) {
239                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
240                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
241                                                              pp->new_insns[i]);
242                 /*
243                  * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
244                  * which ends with "dsb; isb" pair guaranteeing global
245                  * visibility.
246                  */
247                 /* Notify other processors with an additional increment. */
248                 atomic_inc(&pp->cpu_count);
249         } else {
250                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
251                         cpu_relax();
252                 isb();
253         }
254
255         return ret;
256 }
257
258 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
259 {
260         struct aarch64_insn_patch patch = {
261                 .text_addrs = addrs,
262                 .new_insns = insns,
263                 .insn_cnt = cnt,
264                 .cpu_count = ATOMIC_INIT(0),
265         };
266
267         if (cnt <= 0)
268                 return -EINVAL;
269
270         return stop_machine(aarch64_insn_patch_text_cb, &patch,
271                             cpu_online_mask);
272 }
273
274 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
275 {
276         int ret;
277         u32 insn;
278
279         /* Unsafe to patch multiple instructions without synchronizaiton */
280         if (cnt == 1) {
281                 ret = aarch64_insn_read(addrs[0], &insn);
282                 if (ret)
283                         return ret;
284
285                 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
286                         /*
287                          * ARMv8 architecture doesn't guarantee all CPUs see
288                          * the new instruction after returning from function
289                          * aarch64_insn_patch_text_nosync(). So send IPIs to
290                          * all other CPUs to achieve instruction
291                          * synchronization.
292                          */
293                         ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
294                         kick_all_cpus_sync();
295                         return ret;
296                 }
297         }
298
299         return aarch64_insn_patch_text_sync(addrs, insns, cnt);
300 }
301
302 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
303                                                 u32 *maskp, int *shiftp)
304 {
305         u32 mask;
306         int shift;
307
308         switch (type) {
309         case AARCH64_INSN_IMM_26:
310                 mask = BIT(26) - 1;
311                 shift = 0;
312                 break;
313         case AARCH64_INSN_IMM_19:
314                 mask = BIT(19) - 1;
315                 shift = 5;
316                 break;
317         case AARCH64_INSN_IMM_16:
318                 mask = BIT(16) - 1;
319                 shift = 5;
320                 break;
321         case AARCH64_INSN_IMM_14:
322                 mask = BIT(14) - 1;
323                 shift = 5;
324                 break;
325         case AARCH64_INSN_IMM_12:
326                 mask = BIT(12) - 1;
327                 shift = 10;
328                 break;
329         case AARCH64_INSN_IMM_9:
330                 mask = BIT(9) - 1;
331                 shift = 12;
332                 break;
333         case AARCH64_INSN_IMM_7:
334                 mask = BIT(7) - 1;
335                 shift = 15;
336                 break;
337         case AARCH64_INSN_IMM_6:
338         case AARCH64_INSN_IMM_S:
339                 mask = BIT(6) - 1;
340                 shift = 10;
341                 break;
342         case AARCH64_INSN_IMM_R:
343                 mask = BIT(6) - 1;
344                 shift = 16;
345                 break;
346         default:
347                 return -EINVAL;
348         }
349
350         *maskp = mask;
351         *shiftp = shift;
352
353         return 0;
354 }
355
356 #define ADR_IMM_HILOSPLIT       2
357 #define ADR_IMM_SIZE            SZ_2M
358 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
360 #define ADR_IMM_LOSHIFT         29
361 #define ADR_IMM_HISHIFT         5
362
363 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
364 {
365         u32 immlo, immhi, mask;
366         int shift;
367
368         switch (type) {
369         case AARCH64_INSN_IMM_ADR:
370                 shift = 0;
371                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
372                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
373                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
374                 mask = ADR_IMM_SIZE - 1;
375                 break;
376         default:
377                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
378                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
379                                type);
380                         return 0;
381                 }
382         }
383
384         return (insn >> shift) & mask;
385 }
386
387 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
388                                   u32 insn, u64 imm)
389 {
390         u32 immlo, immhi, mask;
391         int shift;
392
393         switch (type) {
394         case AARCH64_INSN_IMM_ADR:
395                 shift = 0;
396                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
397                 imm >>= ADR_IMM_HILOSPLIT;
398                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
399                 imm = immlo | immhi;
400                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
401                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
402                 break;
403         default:
404                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
405                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
406                                type);
407                         return 0;
408                 }
409         }
410
411         /* Update the immediate field. */
412         insn &= ~(mask << shift);
413         insn |= (imm & mask) << shift;
414
415         return insn;
416 }
417
418 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
419                                         u32 insn,
420                                         enum aarch64_insn_register reg)
421 {
422         int shift;
423
424         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
425                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
426                 return 0;
427         }
428
429         switch (type) {
430         case AARCH64_INSN_REGTYPE_RT:
431         case AARCH64_INSN_REGTYPE_RD:
432                 shift = 0;
433                 break;
434         case AARCH64_INSN_REGTYPE_RN:
435                 shift = 5;
436                 break;
437         case AARCH64_INSN_REGTYPE_RT2:
438         case AARCH64_INSN_REGTYPE_RA:
439                 shift = 10;
440                 break;
441         case AARCH64_INSN_REGTYPE_RM:
442                 shift = 16;
443                 break;
444         default:
445                 pr_err("%s: unknown register type encoding %d\n", __func__,
446                        type);
447                 return 0;
448         }
449
450         insn &= ~(GENMASK(4, 0) << shift);
451         insn |= reg << shift;
452
453         return insn;
454 }
455
456 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
457                                          u32 insn)
458 {
459         u32 size;
460
461         switch (type) {
462         case AARCH64_INSN_SIZE_8:
463                 size = 0;
464                 break;
465         case AARCH64_INSN_SIZE_16:
466                 size = 1;
467                 break;
468         case AARCH64_INSN_SIZE_32:
469                 size = 2;
470                 break;
471         case AARCH64_INSN_SIZE_64:
472                 size = 3;
473                 break;
474         default:
475                 pr_err("%s: unknown size encoding %d\n", __func__, type);
476                 return 0;
477         }
478
479         insn &= ~GENMASK(31, 30);
480         insn |= size << 30;
481
482         return insn;
483 }
484
485 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
486                                      long range)
487 {
488         long offset;
489
490         /*
491          * PC: A 64-bit Program Counter holding the address of the current
492          * instruction. A64 instructions must be word-aligned.
493          */
494         BUG_ON((pc & 0x3) || (addr & 0x3));
495
496         offset = ((long)addr - (long)pc);
497         BUG_ON(offset < -range || offset >= range);
498
499         return offset;
500 }
501
502 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
503                                           enum aarch64_insn_branch_type type)
504 {
505         u32 insn;
506         long offset;
507
508         /*
509          * B/BL support [-128M, 128M) offset
510          * ARM64 virtual address arrangement guarantees all kernel and module
511          * texts are within +/-128M.
512          */
513         offset = branch_imm_common(pc, addr, SZ_128M);
514
515         switch (type) {
516         case AARCH64_INSN_BRANCH_LINK:
517                 insn = aarch64_insn_get_bl_value();
518                 break;
519         case AARCH64_INSN_BRANCH_NOLINK:
520                 insn = aarch64_insn_get_b_value();
521                 break;
522         default:
523                 BUG_ON(1);
524                 return AARCH64_BREAK_FAULT;
525         }
526
527         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
528                                              offset >> 2);
529 }
530
531 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
532                                      enum aarch64_insn_register reg,
533                                      enum aarch64_insn_variant variant,
534                                      enum aarch64_insn_branch_type type)
535 {
536         u32 insn;
537         long offset;
538
539         offset = branch_imm_common(pc, addr, SZ_1M);
540
541         switch (type) {
542         case AARCH64_INSN_BRANCH_COMP_ZERO:
543                 insn = aarch64_insn_get_cbz_value();
544                 break;
545         case AARCH64_INSN_BRANCH_COMP_NONZERO:
546                 insn = aarch64_insn_get_cbnz_value();
547                 break;
548         default:
549                 BUG_ON(1);
550                 return AARCH64_BREAK_FAULT;
551         }
552
553         switch (variant) {
554         case AARCH64_INSN_VARIANT_32BIT:
555                 break;
556         case AARCH64_INSN_VARIANT_64BIT:
557                 insn |= AARCH64_INSN_SF_BIT;
558                 break;
559         default:
560                 BUG_ON(1);
561                 return AARCH64_BREAK_FAULT;
562         }
563
564         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
565
566         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
567                                              offset >> 2);
568 }
569
570 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
571                                      enum aarch64_insn_condition cond)
572 {
573         u32 insn;
574         long offset;
575
576         offset = branch_imm_common(pc, addr, SZ_1M);
577
578         insn = aarch64_insn_get_bcond_value();
579
580         BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
581         insn |= cond;
582
583         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
584                                              offset >> 2);
585 }
586
587 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
588 {
589         return aarch64_insn_get_hint_value() | op;
590 }
591
592 u32 __kprobes aarch64_insn_gen_nop(void)
593 {
594         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
595 }
596
597 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
598                                 enum aarch64_insn_branch_type type)
599 {
600         u32 insn;
601
602         switch (type) {
603         case AARCH64_INSN_BRANCH_NOLINK:
604                 insn = aarch64_insn_get_br_value();
605                 break;
606         case AARCH64_INSN_BRANCH_LINK:
607                 insn = aarch64_insn_get_blr_value();
608                 break;
609         case AARCH64_INSN_BRANCH_RETURN:
610                 insn = aarch64_insn_get_ret_value();
611                 break;
612         default:
613                 BUG_ON(1);
614                 return AARCH64_BREAK_FAULT;
615         }
616
617         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
618 }
619
620 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
621                                     enum aarch64_insn_register base,
622                                     enum aarch64_insn_register offset,
623                                     enum aarch64_insn_size_type size,
624                                     enum aarch64_insn_ldst_type type)
625 {
626         u32 insn;
627
628         switch (type) {
629         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
630                 insn = aarch64_insn_get_ldr_reg_value();
631                 break;
632         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
633                 insn = aarch64_insn_get_str_reg_value();
634                 break;
635         default:
636                 BUG_ON(1);
637                 return AARCH64_BREAK_FAULT;
638         }
639
640         insn = aarch64_insn_encode_ldst_size(size, insn);
641
642         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
643
644         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
645                                             base);
646
647         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
648                                             offset);
649 }
650
651 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
652                                      enum aarch64_insn_register reg2,
653                                      enum aarch64_insn_register base,
654                                      int offset,
655                                      enum aarch64_insn_variant variant,
656                                      enum aarch64_insn_ldst_type type)
657 {
658         u32 insn;
659         int shift;
660
661         switch (type) {
662         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
663                 insn = aarch64_insn_get_ldp_pre_value();
664                 break;
665         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
666                 insn = aarch64_insn_get_stp_pre_value();
667                 break;
668         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
669                 insn = aarch64_insn_get_ldp_post_value();
670                 break;
671         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
672                 insn = aarch64_insn_get_stp_post_value();
673                 break;
674         default:
675                 BUG_ON(1);
676                 return AARCH64_BREAK_FAULT;
677         }
678
679         switch (variant) {
680         case AARCH64_INSN_VARIANT_32BIT:
681                 /* offset must be multiples of 4 in the range [-256, 252] */
682                 BUG_ON(offset & 0x3);
683                 BUG_ON(offset < -256 || offset > 252);
684                 shift = 2;
685                 break;
686         case AARCH64_INSN_VARIANT_64BIT:
687                 /* offset must be multiples of 8 in the range [-512, 504] */
688                 BUG_ON(offset & 0x7);
689                 BUG_ON(offset < -512 || offset > 504);
690                 shift = 3;
691                 insn |= AARCH64_INSN_SF_BIT;
692                 break;
693         default:
694                 BUG_ON(1);
695                 return AARCH64_BREAK_FAULT;
696         }
697
698         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
699                                             reg1);
700
701         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
702                                             reg2);
703
704         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
705                                             base);
706
707         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
708                                              offset >> shift);
709 }
710
711 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
712                                  enum aarch64_insn_register src,
713                                  int imm, enum aarch64_insn_variant variant,
714                                  enum aarch64_insn_adsb_type type)
715 {
716         u32 insn;
717
718         switch (type) {
719         case AARCH64_INSN_ADSB_ADD:
720                 insn = aarch64_insn_get_add_imm_value();
721                 break;
722         case AARCH64_INSN_ADSB_SUB:
723                 insn = aarch64_insn_get_sub_imm_value();
724                 break;
725         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
726                 insn = aarch64_insn_get_adds_imm_value();
727                 break;
728         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
729                 insn = aarch64_insn_get_subs_imm_value();
730                 break;
731         default:
732                 BUG_ON(1);
733                 return AARCH64_BREAK_FAULT;
734         }
735
736         switch (variant) {
737         case AARCH64_INSN_VARIANT_32BIT:
738                 break;
739         case AARCH64_INSN_VARIANT_64BIT:
740                 insn |= AARCH64_INSN_SF_BIT;
741                 break;
742         default:
743                 BUG_ON(1);
744                 return AARCH64_BREAK_FAULT;
745         }
746
747         BUG_ON(imm & ~(SZ_4K - 1));
748
749         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
750
751         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
752
753         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
754 }
755
756 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
757                               enum aarch64_insn_register src,
758                               int immr, int imms,
759                               enum aarch64_insn_variant variant,
760                               enum aarch64_insn_bitfield_type type)
761 {
762         u32 insn;
763         u32 mask;
764
765         switch (type) {
766         case AARCH64_INSN_BITFIELD_MOVE:
767                 insn = aarch64_insn_get_bfm_value();
768                 break;
769         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
770                 insn = aarch64_insn_get_ubfm_value();
771                 break;
772         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
773                 insn = aarch64_insn_get_sbfm_value();
774                 break;
775         default:
776                 BUG_ON(1);
777                 return AARCH64_BREAK_FAULT;
778         }
779
780         switch (variant) {
781         case AARCH64_INSN_VARIANT_32BIT:
782                 mask = GENMASK(4, 0);
783                 break;
784         case AARCH64_INSN_VARIANT_64BIT:
785                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
786                 mask = GENMASK(5, 0);
787                 break;
788         default:
789                 BUG_ON(1);
790                 return AARCH64_BREAK_FAULT;
791         }
792
793         BUG_ON(immr & ~mask);
794         BUG_ON(imms & ~mask);
795
796         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
797
798         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
799
800         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
801
802         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
803 }
804
805 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
806                               int imm, int shift,
807                               enum aarch64_insn_variant variant,
808                               enum aarch64_insn_movewide_type type)
809 {
810         u32 insn;
811
812         switch (type) {
813         case AARCH64_INSN_MOVEWIDE_ZERO:
814                 insn = aarch64_insn_get_movz_value();
815                 break;
816         case AARCH64_INSN_MOVEWIDE_KEEP:
817                 insn = aarch64_insn_get_movk_value();
818                 break;
819         case AARCH64_INSN_MOVEWIDE_INVERSE:
820                 insn = aarch64_insn_get_movn_value();
821                 break;
822         default:
823                 BUG_ON(1);
824                 return AARCH64_BREAK_FAULT;
825         }
826
827         BUG_ON(imm & ~(SZ_64K - 1));
828
829         switch (variant) {
830         case AARCH64_INSN_VARIANT_32BIT:
831                 BUG_ON(shift != 0 && shift != 16);
832                 break;
833         case AARCH64_INSN_VARIANT_64BIT:
834                 insn |= AARCH64_INSN_SF_BIT;
835                 BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
836                        shift != 48);
837                 break;
838         default:
839                 BUG_ON(1);
840                 return AARCH64_BREAK_FAULT;
841         }
842
843         insn |= (shift >> 4) << 21;
844
845         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
846
847         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
848 }
849
850 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
851                                          enum aarch64_insn_register src,
852                                          enum aarch64_insn_register reg,
853                                          int shift,
854                                          enum aarch64_insn_variant variant,
855                                          enum aarch64_insn_adsb_type type)
856 {
857         u32 insn;
858
859         switch (type) {
860         case AARCH64_INSN_ADSB_ADD:
861                 insn = aarch64_insn_get_add_value();
862                 break;
863         case AARCH64_INSN_ADSB_SUB:
864                 insn = aarch64_insn_get_sub_value();
865                 break;
866         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
867                 insn = aarch64_insn_get_adds_value();
868                 break;
869         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
870                 insn = aarch64_insn_get_subs_value();
871                 break;
872         default:
873                 BUG_ON(1);
874                 return AARCH64_BREAK_FAULT;
875         }
876
877         switch (variant) {
878         case AARCH64_INSN_VARIANT_32BIT:
879                 BUG_ON(shift & ~(SZ_32 - 1));
880                 break;
881         case AARCH64_INSN_VARIANT_64BIT:
882                 insn |= AARCH64_INSN_SF_BIT;
883                 BUG_ON(shift & ~(SZ_64 - 1));
884                 break;
885         default:
886                 BUG_ON(1);
887                 return AARCH64_BREAK_FAULT;
888         }
889
890
891         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
892
893         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
894
895         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
896
897         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
898 }
899
900 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
901                            enum aarch64_insn_register src,
902                            enum aarch64_insn_variant variant,
903                            enum aarch64_insn_data1_type type)
904 {
905         u32 insn;
906
907         switch (type) {
908         case AARCH64_INSN_DATA1_REVERSE_16:
909                 insn = aarch64_insn_get_rev16_value();
910                 break;
911         case AARCH64_INSN_DATA1_REVERSE_32:
912                 insn = aarch64_insn_get_rev32_value();
913                 break;
914         case AARCH64_INSN_DATA1_REVERSE_64:
915                 BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
916                 insn = aarch64_insn_get_rev64_value();
917                 break;
918         default:
919                 BUG_ON(1);
920                 return AARCH64_BREAK_FAULT;
921         }
922
923         switch (variant) {
924         case AARCH64_INSN_VARIANT_32BIT:
925                 break;
926         case AARCH64_INSN_VARIANT_64BIT:
927                 insn |= AARCH64_INSN_SF_BIT;
928                 break;
929         default:
930                 BUG_ON(1);
931                 return AARCH64_BREAK_FAULT;
932         }
933
934         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
935
936         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
937 }
938
939 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
940                            enum aarch64_insn_register src,
941                            enum aarch64_insn_register reg,
942                            enum aarch64_insn_variant variant,
943                            enum aarch64_insn_data2_type type)
944 {
945         u32 insn;
946
947         switch (type) {
948         case AARCH64_INSN_DATA2_UDIV:
949                 insn = aarch64_insn_get_udiv_value();
950                 break;
951         case AARCH64_INSN_DATA2_SDIV:
952                 insn = aarch64_insn_get_sdiv_value();
953                 break;
954         case AARCH64_INSN_DATA2_LSLV:
955                 insn = aarch64_insn_get_lslv_value();
956                 break;
957         case AARCH64_INSN_DATA2_LSRV:
958                 insn = aarch64_insn_get_lsrv_value();
959                 break;
960         case AARCH64_INSN_DATA2_ASRV:
961                 insn = aarch64_insn_get_asrv_value();
962                 break;
963         case AARCH64_INSN_DATA2_RORV:
964                 insn = aarch64_insn_get_rorv_value();
965                 break;
966         default:
967                 BUG_ON(1);
968                 return AARCH64_BREAK_FAULT;
969         }
970
971         switch (variant) {
972         case AARCH64_INSN_VARIANT_32BIT:
973                 break;
974         case AARCH64_INSN_VARIANT_64BIT:
975                 insn |= AARCH64_INSN_SF_BIT;
976                 break;
977         default:
978                 BUG_ON(1);
979                 return AARCH64_BREAK_FAULT;
980         }
981
982         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
983
984         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
985
986         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
987 }
988
989 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
990                            enum aarch64_insn_register src,
991                            enum aarch64_insn_register reg1,
992                            enum aarch64_insn_register reg2,
993                            enum aarch64_insn_variant variant,
994                            enum aarch64_insn_data3_type type)
995 {
996         u32 insn;
997
998         switch (type) {
999         case AARCH64_INSN_DATA3_MADD:
1000                 insn = aarch64_insn_get_madd_value();
1001                 break;
1002         case AARCH64_INSN_DATA3_MSUB:
1003                 insn = aarch64_insn_get_msub_value();
1004                 break;
1005         default:
1006                 BUG_ON(1);
1007                 return AARCH64_BREAK_FAULT;
1008         }
1009
1010         switch (variant) {
1011         case AARCH64_INSN_VARIANT_32BIT:
1012                 break;
1013         case AARCH64_INSN_VARIANT_64BIT:
1014                 insn |= AARCH64_INSN_SF_BIT;
1015                 break;
1016         default:
1017                 BUG_ON(1);
1018                 return AARCH64_BREAK_FAULT;
1019         }
1020
1021         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1022
1023         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1024
1025         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1026                                             reg1);
1027
1028         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1029                                             reg2);
1030 }
1031
1032 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1033                                          enum aarch64_insn_register src,
1034                                          enum aarch64_insn_register reg,
1035                                          int shift,
1036                                          enum aarch64_insn_variant variant,
1037                                          enum aarch64_insn_logic_type type)
1038 {
1039         u32 insn;
1040
1041         switch (type) {
1042         case AARCH64_INSN_LOGIC_AND:
1043                 insn = aarch64_insn_get_and_value();
1044                 break;
1045         case AARCH64_INSN_LOGIC_BIC:
1046                 insn = aarch64_insn_get_bic_value();
1047                 break;
1048         case AARCH64_INSN_LOGIC_ORR:
1049                 insn = aarch64_insn_get_orr_value();
1050                 break;
1051         case AARCH64_INSN_LOGIC_ORN:
1052                 insn = aarch64_insn_get_orn_value();
1053                 break;
1054         case AARCH64_INSN_LOGIC_EOR:
1055                 insn = aarch64_insn_get_eor_value();
1056                 break;
1057         case AARCH64_INSN_LOGIC_EON:
1058                 insn = aarch64_insn_get_eon_value();
1059                 break;
1060         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1061                 insn = aarch64_insn_get_ands_value();
1062                 break;
1063         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1064                 insn = aarch64_insn_get_bics_value();
1065                 break;
1066         default:
1067                 BUG_ON(1);
1068                 return AARCH64_BREAK_FAULT;
1069         }
1070
1071         switch (variant) {
1072         case AARCH64_INSN_VARIANT_32BIT:
1073                 BUG_ON(shift & ~(SZ_32 - 1));
1074                 break;
1075         case AARCH64_INSN_VARIANT_64BIT:
1076                 insn |= AARCH64_INSN_SF_BIT;
1077                 BUG_ON(shift & ~(SZ_64 - 1));
1078                 break;
1079         default:
1080                 BUG_ON(1);
1081                 return AARCH64_BREAK_FAULT;
1082         }
1083
1084
1085         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1086
1087         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1088
1089         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1090
1091         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1092 }
1093
1094 /*
1095  * Decode the imm field of a branch, and return the byte offset as a
1096  * signed value (so it can be used when computing a new branch
1097  * target).
1098  */
1099 s32 aarch64_get_branch_offset(u32 insn)
1100 {
1101         s32 imm;
1102
1103         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1104                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1105                 return (imm << 6) >> 4;
1106         }
1107
1108         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1109             aarch64_insn_is_bcond(insn)) {
1110                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1111                 return (imm << 13) >> 11;
1112         }
1113
1114         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1115                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1116                 return (imm << 18) >> 16;
1117         }
1118
1119         /* Unhandled instruction */
1120         BUG();
1121 }
1122
1123 /*
1124  * Encode the displacement of a branch in the imm field and return the
1125  * updated instruction.
1126  */
1127 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1128 {
1129         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1130                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1131                                                      offset >> 2);
1132
1133         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1134             aarch64_insn_is_bcond(insn))
1135                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1136                                                      offset >> 2);
1137
1138         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1139                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1140                                                      offset >> 2);
1141
1142         /* Unhandled instruction */
1143         BUG();
1144 }
1145
1146 /*
1147  * Extract the Op/CR data from a msr/mrs instruction.
1148  */
1149 u32 aarch64_insn_extract_system_reg(u32 insn)
1150 {
1151         return (insn & 0x1FFFE0) >> 5;
1152 }
1153
1154 bool aarch32_insn_is_wide(u32 insn)
1155 {
1156         return insn >= 0xe800;
1157 }
1158
1159 /*
1160  * Macros/defines for extracting register numbers from instruction.
1161  */
1162 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1163 {
1164         return (insn & (0xf << offset)) >> offset;
1165 }
1166
1167 #define OPC2_MASK       0x7
1168 #define OPC2_OFFSET     5
1169 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1170 {
1171         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1172 }
1173
1174 #define CRM_MASK        0xf
1175 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1176 {
1177         return insn & CRM_MASK;
1178 }
1179
1180 static bool __kprobes __check_eq(unsigned long pstate)
1181 {
1182         return (pstate & PSR_Z_BIT) != 0;
1183 }
1184
1185 static bool __kprobes __check_ne(unsigned long pstate)
1186 {
1187         return (pstate & PSR_Z_BIT) == 0;
1188 }
1189
1190 static bool __kprobes __check_cs(unsigned long pstate)
1191 {
1192         return (pstate & PSR_C_BIT) != 0;
1193 }
1194
1195 static bool __kprobes __check_cc(unsigned long pstate)
1196 {
1197         return (pstate & PSR_C_BIT) == 0;
1198 }
1199
1200 static bool __kprobes __check_mi(unsigned long pstate)
1201 {
1202         return (pstate & PSR_N_BIT) != 0;
1203 }
1204
1205 static bool __kprobes __check_pl(unsigned long pstate)
1206 {
1207         return (pstate & PSR_N_BIT) == 0;
1208 }
1209
1210 static bool __kprobes __check_vs(unsigned long pstate)
1211 {
1212         return (pstate & PSR_V_BIT) != 0;
1213 }
1214
1215 static bool __kprobes __check_vc(unsigned long pstate)
1216 {
1217         return (pstate & PSR_V_BIT) == 0;
1218 }
1219
1220 static bool __kprobes __check_hi(unsigned long pstate)
1221 {
1222         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1223         return (pstate & PSR_C_BIT) != 0;
1224 }
1225
1226 static bool __kprobes __check_ls(unsigned long pstate)
1227 {
1228         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1229         return (pstate & PSR_C_BIT) == 0;
1230 }
1231
1232 static bool __kprobes __check_ge(unsigned long pstate)
1233 {
1234         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1235         return (pstate & PSR_N_BIT) == 0;
1236 }
1237
1238 static bool __kprobes __check_lt(unsigned long pstate)
1239 {
1240         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1241         return (pstate & PSR_N_BIT) != 0;
1242 }
1243
1244 static bool __kprobes __check_gt(unsigned long pstate)
1245 {
1246         /*PSR_N_BIT ^= PSR_V_BIT */
1247         unsigned long temp = pstate ^ (pstate << 3);
1248
1249         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1250         return (temp & PSR_N_BIT) == 0;
1251 }
1252
1253 static bool __kprobes __check_le(unsigned long pstate)
1254 {
1255         /*PSR_N_BIT ^= PSR_V_BIT */
1256         unsigned long temp = pstate ^ (pstate << 3);
1257
1258         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1259         return (temp & PSR_N_BIT) != 0;
1260 }
1261
1262 static bool __kprobes __check_al(unsigned long pstate)
1263 {
1264         return true;
1265 }
1266
1267 /*
1268  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1269  * it behaves identically to 0b1110 ("al").
1270  */
1271 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1272         __check_eq, __check_ne, __check_cs, __check_cc,
1273         __check_mi, __check_pl, __check_vs, __check_vc,
1274         __check_hi, __check_ls, __check_ge, __check_lt,
1275         __check_gt, __check_le, __check_al, __check_al
1276 };