arm64: Fix text patching logic when using fixmap
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / insn.c
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34
35 #define AARCH64_INSN_SF_BIT     BIT(31)
36 #define AARCH64_INSN_N_BIT      BIT(22)
37
38 static int aarch64_insn_encoding_class[] = {
39         AARCH64_INSN_CLS_UNKNOWN,
40         AARCH64_INSN_CLS_UNKNOWN,
41         AARCH64_INSN_CLS_UNKNOWN,
42         AARCH64_INSN_CLS_UNKNOWN,
43         AARCH64_INSN_CLS_LDST,
44         AARCH64_INSN_CLS_DP_REG,
45         AARCH64_INSN_CLS_LDST,
46         AARCH64_INSN_CLS_DP_FPSIMD,
47         AARCH64_INSN_CLS_DP_IMM,
48         AARCH64_INSN_CLS_DP_IMM,
49         AARCH64_INSN_CLS_BR_SYS,
50         AARCH64_INSN_CLS_BR_SYS,
51         AARCH64_INSN_CLS_LDST,
52         AARCH64_INSN_CLS_DP_REG,
53         AARCH64_INSN_CLS_LDST,
54         AARCH64_INSN_CLS_DP_FPSIMD,
55 };
56
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58 {
59         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60 }
61
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
64 {
65         if (!aarch64_insn_is_hint(insn))
66                 return false;
67
68         switch (insn & 0xFE0) {
69         case AARCH64_INSN_HINT_YIELD:
70         case AARCH64_INSN_HINT_WFE:
71         case AARCH64_INSN_HINT_WFI:
72         case AARCH64_INSN_HINT_SEV:
73         case AARCH64_INSN_HINT_SEVL:
74                 return false;
75         default:
76                 return true;
77         }
78 }
79
80 static DEFINE_SPINLOCK(patch_lock);
81
82 static void __kprobes *patch_map(void *addr, int fixmap)
83 {
84         unsigned long uintaddr = (uintptr_t) addr;
85         bool module = !core_kernel_text(uintaddr);
86         struct page *page;
87
88         if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
89                 page = vmalloc_to_page(addr);
90         else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
91                 page = virt_to_page(addr);
92         else
93                 return addr;
94
95         BUG_ON(!page);
96         set_fixmap(fixmap, page_to_phys(page));
97
98         return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
99 }
100
101 static void __kprobes patch_unmap(int fixmap)
102 {
103         clear_fixmap(fixmap);
104 }
105 /*
106  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
107  * little-endian.
108  */
109 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
110 {
111         int ret;
112         u32 val;
113
114         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
115         if (!ret)
116                 *insnp = le32_to_cpu(val);
117
118         return ret;
119 }
120
121 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
122 {
123         void *waddr = addr;
124         unsigned long flags = 0;
125         int ret;
126
127         spin_lock_irqsave(&patch_lock, flags);
128         waddr = patch_map(addr, FIX_TEXT_POKE0);
129
130         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
131
132         patch_unmap(FIX_TEXT_POKE0);
133         spin_unlock_irqrestore(&patch_lock, flags);
134
135         return ret;
136 }
137
138 int __kprobes aarch64_insn_write(void *addr, u32 insn)
139 {
140         insn = cpu_to_le32(insn);
141         return __aarch64_insn_write(addr, insn);
142 }
143
144 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
145 {
146         if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
147                 return false;
148
149         return  aarch64_insn_is_b(insn) ||
150                 aarch64_insn_is_bl(insn) ||
151                 aarch64_insn_is_svc(insn) ||
152                 aarch64_insn_is_hvc(insn) ||
153                 aarch64_insn_is_smc(insn) ||
154                 aarch64_insn_is_brk(insn) ||
155                 aarch64_insn_is_nop(insn);
156 }
157
158 /*
159  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
160  * Section B2.6.5 "Concurrent modification and execution of instructions":
161  * Concurrent modification and execution of instructions can lead to the
162  * resulting instruction performing any behavior that can be achieved by
163  * executing any sequence of instructions that can be executed from the
164  * same Exception level, except where the instruction before modification
165  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
166  * or SMC instruction.
167  */
168 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
169 {
170         return __aarch64_insn_hotpatch_safe(old_insn) &&
171                __aarch64_insn_hotpatch_safe(new_insn);
172 }
173
174 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
175 {
176         u32 *tp = addr;
177         int ret;
178
179         /* A64 instructions must be word aligned */
180         if ((uintptr_t)tp & 0x3)
181                 return -EINVAL;
182
183         ret = aarch64_insn_write(tp, insn);
184         if (ret == 0)
185                 flush_icache_range((uintptr_t)tp,
186                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
187
188         return ret;
189 }
190
191 struct aarch64_insn_patch {
192         void            **text_addrs;
193         u32             *new_insns;
194         int             insn_cnt;
195         atomic_t        cpu_count;
196 };
197
198 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
199 {
200         int i, ret = 0;
201         struct aarch64_insn_patch *pp = arg;
202
203         /* The first CPU becomes master */
204         if (atomic_inc_return(&pp->cpu_count) == 1) {
205                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
206                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
207                                                              pp->new_insns[i]);
208                 /*
209                  * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
210                  * which ends with "dsb; isb" pair guaranteeing global
211                  * visibility.
212                  */
213                 /* Notify other processors with an additional increment. */
214                 atomic_inc(&pp->cpu_count);
215         } else {
216                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
217                         cpu_relax();
218                 isb();
219         }
220
221         return ret;
222 }
223
224 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
225 {
226         struct aarch64_insn_patch patch = {
227                 .text_addrs = addrs,
228                 .new_insns = insns,
229                 .insn_cnt = cnt,
230                 .cpu_count = ATOMIC_INIT(0),
231         };
232
233         if (cnt <= 0)
234                 return -EINVAL;
235
236         return stop_machine(aarch64_insn_patch_text_cb, &patch,
237                             cpu_online_mask);
238 }
239
240 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
241 {
242         int ret;
243         u32 insn;
244
245         /* Unsafe to patch multiple instructions without synchronizaiton */
246         if (cnt == 1) {
247                 ret = aarch64_insn_read(addrs[0], &insn);
248                 if (ret)
249                         return ret;
250
251                 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
252                         /*
253                          * ARMv8 architecture doesn't guarantee all CPUs see
254                          * the new instruction after returning from function
255                          * aarch64_insn_patch_text_nosync(). So send IPIs to
256                          * all other CPUs to achieve instruction
257                          * synchronization.
258                          */
259                         ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
260                         kick_all_cpus_sync();
261                         return ret;
262                 }
263         }
264
265         return aarch64_insn_patch_text_sync(addrs, insns, cnt);
266 }
267
268 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
269                                   u32 insn, u64 imm)
270 {
271         u32 immlo, immhi, lomask, himask, mask;
272         int shift;
273
274         switch (type) {
275         case AARCH64_INSN_IMM_ADR:
276                 lomask = 0x3;
277                 himask = 0x7ffff;
278                 immlo = imm & lomask;
279                 imm >>= 2;
280                 immhi = imm & himask;
281                 imm = (immlo << 24) | (immhi);
282                 mask = (lomask << 24) | (himask);
283                 shift = 5;
284                 break;
285         case AARCH64_INSN_IMM_26:
286                 mask = BIT(26) - 1;
287                 shift = 0;
288                 break;
289         case AARCH64_INSN_IMM_19:
290                 mask = BIT(19) - 1;
291                 shift = 5;
292                 break;
293         case AARCH64_INSN_IMM_16:
294                 mask = BIT(16) - 1;
295                 shift = 5;
296                 break;
297         case AARCH64_INSN_IMM_14:
298                 mask = BIT(14) - 1;
299                 shift = 5;
300                 break;
301         case AARCH64_INSN_IMM_12:
302                 mask = BIT(12) - 1;
303                 shift = 10;
304                 break;
305         case AARCH64_INSN_IMM_9:
306                 mask = BIT(9) - 1;
307                 shift = 12;
308                 break;
309         case AARCH64_INSN_IMM_7:
310                 mask = BIT(7) - 1;
311                 shift = 15;
312                 break;
313         case AARCH64_INSN_IMM_6:
314         case AARCH64_INSN_IMM_S:
315                 mask = BIT(6) - 1;
316                 shift = 10;
317                 break;
318         case AARCH64_INSN_IMM_R:
319                 mask = BIT(6) - 1;
320                 shift = 16;
321                 break;
322         default:
323                 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
324                         type);
325                 return 0;
326         }
327
328         /* Update the immediate field. */
329         insn &= ~(mask << shift);
330         insn |= (imm & mask) << shift;
331
332         return insn;
333 }
334
335 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
336                                         u32 insn,
337                                         enum aarch64_insn_register reg)
338 {
339         int shift;
340
341         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
342                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
343                 return 0;
344         }
345
346         switch (type) {
347         case AARCH64_INSN_REGTYPE_RT:
348         case AARCH64_INSN_REGTYPE_RD:
349                 shift = 0;
350                 break;
351         case AARCH64_INSN_REGTYPE_RN:
352                 shift = 5;
353                 break;
354         case AARCH64_INSN_REGTYPE_RT2:
355         case AARCH64_INSN_REGTYPE_RA:
356                 shift = 10;
357                 break;
358         case AARCH64_INSN_REGTYPE_RM:
359                 shift = 16;
360                 break;
361         default:
362                 pr_err("%s: unknown register type encoding %d\n", __func__,
363                        type);
364                 return 0;
365         }
366
367         insn &= ~(GENMASK(4, 0) << shift);
368         insn |= reg << shift;
369
370         return insn;
371 }
372
373 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
374                                          u32 insn)
375 {
376         u32 size;
377
378         switch (type) {
379         case AARCH64_INSN_SIZE_8:
380                 size = 0;
381                 break;
382         case AARCH64_INSN_SIZE_16:
383                 size = 1;
384                 break;
385         case AARCH64_INSN_SIZE_32:
386                 size = 2;
387                 break;
388         case AARCH64_INSN_SIZE_64:
389                 size = 3;
390                 break;
391         default:
392                 pr_err("%s: unknown size encoding %d\n", __func__, type);
393                 return 0;
394         }
395
396         insn &= ~GENMASK(31, 30);
397         insn |= size << 30;
398
399         return insn;
400 }
401
402 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
403                                      long range)
404 {
405         long offset;
406
407         /*
408          * PC: A 64-bit Program Counter holding the address of the current
409          * instruction. A64 instructions must be word-aligned.
410          */
411         BUG_ON((pc & 0x3) || (addr & 0x3));
412
413         offset = ((long)addr - (long)pc);
414         BUG_ON(offset < -range || offset >= range);
415
416         return offset;
417 }
418
419 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
420                                           enum aarch64_insn_branch_type type)
421 {
422         u32 insn;
423         long offset;
424
425         /*
426          * B/BL support [-128M, 128M) offset
427          * ARM64 virtual address arrangement guarantees all kernel and module
428          * texts are within +/-128M.
429          */
430         offset = branch_imm_common(pc, addr, SZ_128M);
431
432         switch (type) {
433         case AARCH64_INSN_BRANCH_LINK:
434                 insn = aarch64_insn_get_bl_value();
435                 break;
436         case AARCH64_INSN_BRANCH_NOLINK:
437                 insn = aarch64_insn_get_b_value();
438                 break;
439         default:
440                 BUG_ON(1);
441                 return AARCH64_BREAK_FAULT;
442         }
443
444         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
445                                              offset >> 2);
446 }
447
448 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
449                                      enum aarch64_insn_register reg,
450                                      enum aarch64_insn_variant variant,
451                                      enum aarch64_insn_branch_type type)
452 {
453         u32 insn;
454         long offset;
455
456         offset = branch_imm_common(pc, addr, SZ_1M);
457
458         switch (type) {
459         case AARCH64_INSN_BRANCH_COMP_ZERO:
460                 insn = aarch64_insn_get_cbz_value();
461                 break;
462         case AARCH64_INSN_BRANCH_COMP_NONZERO:
463                 insn = aarch64_insn_get_cbnz_value();
464                 break;
465         default:
466                 BUG_ON(1);
467                 return AARCH64_BREAK_FAULT;
468         }
469
470         switch (variant) {
471         case AARCH64_INSN_VARIANT_32BIT:
472                 break;
473         case AARCH64_INSN_VARIANT_64BIT:
474                 insn |= AARCH64_INSN_SF_BIT;
475                 break;
476         default:
477                 BUG_ON(1);
478                 return AARCH64_BREAK_FAULT;
479         }
480
481         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
482
483         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
484                                              offset >> 2);
485 }
486
487 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
488                                      enum aarch64_insn_condition cond)
489 {
490         u32 insn;
491         long offset;
492
493         offset = branch_imm_common(pc, addr, SZ_1M);
494
495         insn = aarch64_insn_get_bcond_value();
496
497         BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
498         insn |= cond;
499
500         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
501                                              offset >> 2);
502 }
503
504 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
505 {
506         return aarch64_insn_get_hint_value() | op;
507 }
508
509 u32 __kprobes aarch64_insn_gen_nop(void)
510 {
511         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
512 }
513
514 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
515                                 enum aarch64_insn_branch_type type)
516 {
517         u32 insn;
518
519         switch (type) {
520         case AARCH64_INSN_BRANCH_NOLINK:
521                 insn = aarch64_insn_get_br_value();
522                 break;
523         case AARCH64_INSN_BRANCH_LINK:
524                 insn = aarch64_insn_get_blr_value();
525                 break;
526         case AARCH64_INSN_BRANCH_RETURN:
527                 insn = aarch64_insn_get_ret_value();
528                 break;
529         default:
530                 BUG_ON(1);
531                 return AARCH64_BREAK_FAULT;
532         }
533
534         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
535 }
536
537 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
538                                     enum aarch64_insn_register base,
539                                     enum aarch64_insn_register offset,
540                                     enum aarch64_insn_size_type size,
541                                     enum aarch64_insn_ldst_type type)
542 {
543         u32 insn;
544
545         switch (type) {
546         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
547                 insn = aarch64_insn_get_ldr_reg_value();
548                 break;
549         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
550                 insn = aarch64_insn_get_str_reg_value();
551                 break;
552         default:
553                 BUG_ON(1);
554                 return AARCH64_BREAK_FAULT;
555         }
556
557         insn = aarch64_insn_encode_ldst_size(size, insn);
558
559         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
560
561         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
562                                             base);
563
564         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
565                                             offset);
566 }
567
568 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
569                                      enum aarch64_insn_register reg2,
570                                      enum aarch64_insn_register base,
571                                      int offset,
572                                      enum aarch64_insn_variant variant,
573                                      enum aarch64_insn_ldst_type type)
574 {
575         u32 insn;
576         int shift;
577
578         switch (type) {
579         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
580                 insn = aarch64_insn_get_ldp_pre_value();
581                 break;
582         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
583                 insn = aarch64_insn_get_stp_pre_value();
584                 break;
585         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
586                 insn = aarch64_insn_get_ldp_post_value();
587                 break;
588         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
589                 insn = aarch64_insn_get_stp_post_value();
590                 break;
591         default:
592                 BUG_ON(1);
593                 return AARCH64_BREAK_FAULT;
594         }
595
596         switch (variant) {
597         case AARCH64_INSN_VARIANT_32BIT:
598                 /* offset must be multiples of 4 in the range [-256, 252] */
599                 BUG_ON(offset & 0x3);
600                 BUG_ON(offset < -256 || offset > 252);
601                 shift = 2;
602                 break;
603         case AARCH64_INSN_VARIANT_64BIT:
604                 /* offset must be multiples of 8 in the range [-512, 504] */
605                 BUG_ON(offset & 0x7);
606                 BUG_ON(offset < -512 || offset > 504);
607                 shift = 3;
608                 insn |= AARCH64_INSN_SF_BIT;
609                 break;
610         default:
611                 BUG_ON(1);
612                 return AARCH64_BREAK_FAULT;
613         }
614
615         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
616                                             reg1);
617
618         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
619                                             reg2);
620
621         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
622                                             base);
623
624         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
625                                              offset >> shift);
626 }
627
628 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
629                                  enum aarch64_insn_register src,
630                                  int imm, enum aarch64_insn_variant variant,
631                                  enum aarch64_insn_adsb_type type)
632 {
633         u32 insn;
634
635         switch (type) {
636         case AARCH64_INSN_ADSB_ADD:
637                 insn = aarch64_insn_get_add_imm_value();
638                 break;
639         case AARCH64_INSN_ADSB_SUB:
640                 insn = aarch64_insn_get_sub_imm_value();
641                 break;
642         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
643                 insn = aarch64_insn_get_adds_imm_value();
644                 break;
645         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
646                 insn = aarch64_insn_get_subs_imm_value();
647                 break;
648         default:
649                 BUG_ON(1);
650                 return AARCH64_BREAK_FAULT;
651         }
652
653         switch (variant) {
654         case AARCH64_INSN_VARIANT_32BIT:
655                 break;
656         case AARCH64_INSN_VARIANT_64BIT:
657                 insn |= AARCH64_INSN_SF_BIT;
658                 break;
659         default:
660                 BUG_ON(1);
661                 return AARCH64_BREAK_FAULT;
662         }
663
664         BUG_ON(imm & ~(SZ_4K - 1));
665
666         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
667
668         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
669
670         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
671 }
672
673 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
674                               enum aarch64_insn_register src,
675                               int immr, int imms,
676                               enum aarch64_insn_variant variant,
677                               enum aarch64_insn_bitfield_type type)
678 {
679         u32 insn;
680         u32 mask;
681
682         switch (type) {
683         case AARCH64_INSN_BITFIELD_MOVE:
684                 insn = aarch64_insn_get_bfm_value();
685                 break;
686         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
687                 insn = aarch64_insn_get_ubfm_value();
688                 break;
689         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
690                 insn = aarch64_insn_get_sbfm_value();
691                 break;
692         default:
693                 BUG_ON(1);
694                 return AARCH64_BREAK_FAULT;
695         }
696
697         switch (variant) {
698         case AARCH64_INSN_VARIANT_32BIT:
699                 mask = GENMASK(4, 0);
700                 break;
701         case AARCH64_INSN_VARIANT_64BIT:
702                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
703                 mask = GENMASK(5, 0);
704                 break;
705         default:
706                 BUG_ON(1);
707                 return AARCH64_BREAK_FAULT;
708         }
709
710         BUG_ON(immr & ~mask);
711         BUG_ON(imms & ~mask);
712
713         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
714
715         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
716
717         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
718
719         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
720 }
721
722 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
723                               int imm, int shift,
724                               enum aarch64_insn_variant variant,
725                               enum aarch64_insn_movewide_type type)
726 {
727         u32 insn;
728
729         switch (type) {
730         case AARCH64_INSN_MOVEWIDE_ZERO:
731                 insn = aarch64_insn_get_movz_value();
732                 break;
733         case AARCH64_INSN_MOVEWIDE_KEEP:
734                 insn = aarch64_insn_get_movk_value();
735                 break;
736         case AARCH64_INSN_MOVEWIDE_INVERSE:
737                 insn = aarch64_insn_get_movn_value();
738                 break;
739         default:
740                 BUG_ON(1);
741                 return AARCH64_BREAK_FAULT;
742         }
743
744         BUG_ON(imm & ~(SZ_64K - 1));
745
746         switch (variant) {
747         case AARCH64_INSN_VARIANT_32BIT:
748                 BUG_ON(shift != 0 && shift != 16);
749                 break;
750         case AARCH64_INSN_VARIANT_64BIT:
751                 insn |= AARCH64_INSN_SF_BIT;
752                 BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
753                        shift != 48);
754                 break;
755         default:
756                 BUG_ON(1);
757                 return AARCH64_BREAK_FAULT;
758         }
759
760         insn |= (shift >> 4) << 21;
761
762         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
763
764         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
765 }
766
767 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
768                                          enum aarch64_insn_register src,
769                                          enum aarch64_insn_register reg,
770                                          int shift,
771                                          enum aarch64_insn_variant variant,
772                                          enum aarch64_insn_adsb_type type)
773 {
774         u32 insn;
775
776         switch (type) {
777         case AARCH64_INSN_ADSB_ADD:
778                 insn = aarch64_insn_get_add_value();
779                 break;
780         case AARCH64_INSN_ADSB_SUB:
781                 insn = aarch64_insn_get_sub_value();
782                 break;
783         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
784                 insn = aarch64_insn_get_adds_value();
785                 break;
786         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
787                 insn = aarch64_insn_get_subs_value();
788                 break;
789         default:
790                 BUG_ON(1);
791                 return AARCH64_BREAK_FAULT;
792         }
793
794         switch (variant) {
795         case AARCH64_INSN_VARIANT_32BIT:
796                 BUG_ON(shift & ~(SZ_32 - 1));
797                 break;
798         case AARCH64_INSN_VARIANT_64BIT:
799                 insn |= AARCH64_INSN_SF_BIT;
800                 BUG_ON(shift & ~(SZ_64 - 1));
801                 break;
802         default:
803                 BUG_ON(1);
804                 return AARCH64_BREAK_FAULT;
805         }
806
807
808         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
809
810         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
811
812         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
813
814         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
815 }
816
817 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
818                            enum aarch64_insn_register src,
819                            enum aarch64_insn_variant variant,
820                            enum aarch64_insn_data1_type type)
821 {
822         u32 insn;
823
824         switch (type) {
825         case AARCH64_INSN_DATA1_REVERSE_16:
826                 insn = aarch64_insn_get_rev16_value();
827                 break;
828         case AARCH64_INSN_DATA1_REVERSE_32:
829                 insn = aarch64_insn_get_rev32_value();
830                 break;
831         case AARCH64_INSN_DATA1_REVERSE_64:
832                 BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
833                 insn = aarch64_insn_get_rev64_value();
834                 break;
835         default:
836                 BUG_ON(1);
837                 return AARCH64_BREAK_FAULT;
838         }
839
840         switch (variant) {
841         case AARCH64_INSN_VARIANT_32BIT:
842                 break;
843         case AARCH64_INSN_VARIANT_64BIT:
844                 insn |= AARCH64_INSN_SF_BIT;
845                 break;
846         default:
847                 BUG_ON(1);
848                 return AARCH64_BREAK_FAULT;
849         }
850
851         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
852
853         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
854 }
855
856 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
857                            enum aarch64_insn_register src,
858                            enum aarch64_insn_register reg,
859                            enum aarch64_insn_variant variant,
860                            enum aarch64_insn_data2_type type)
861 {
862         u32 insn;
863
864         switch (type) {
865         case AARCH64_INSN_DATA2_UDIV:
866                 insn = aarch64_insn_get_udiv_value();
867                 break;
868         case AARCH64_INSN_DATA2_SDIV:
869                 insn = aarch64_insn_get_sdiv_value();
870                 break;
871         case AARCH64_INSN_DATA2_LSLV:
872                 insn = aarch64_insn_get_lslv_value();
873                 break;
874         case AARCH64_INSN_DATA2_LSRV:
875                 insn = aarch64_insn_get_lsrv_value();
876                 break;
877         case AARCH64_INSN_DATA2_ASRV:
878                 insn = aarch64_insn_get_asrv_value();
879                 break;
880         case AARCH64_INSN_DATA2_RORV:
881                 insn = aarch64_insn_get_rorv_value();
882                 break;
883         default:
884                 BUG_ON(1);
885                 return AARCH64_BREAK_FAULT;
886         }
887
888         switch (variant) {
889         case AARCH64_INSN_VARIANT_32BIT:
890                 break;
891         case AARCH64_INSN_VARIANT_64BIT:
892                 insn |= AARCH64_INSN_SF_BIT;
893                 break;
894         default:
895                 BUG_ON(1);
896                 return AARCH64_BREAK_FAULT;
897         }
898
899         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
900
901         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
902
903         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
904 }
905
906 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
907                            enum aarch64_insn_register src,
908                            enum aarch64_insn_register reg1,
909                            enum aarch64_insn_register reg2,
910                            enum aarch64_insn_variant variant,
911                            enum aarch64_insn_data3_type type)
912 {
913         u32 insn;
914
915         switch (type) {
916         case AARCH64_INSN_DATA3_MADD:
917                 insn = aarch64_insn_get_madd_value();
918                 break;
919         case AARCH64_INSN_DATA3_MSUB:
920                 insn = aarch64_insn_get_msub_value();
921                 break;
922         default:
923                 BUG_ON(1);
924                 return AARCH64_BREAK_FAULT;
925         }
926
927         switch (variant) {
928         case AARCH64_INSN_VARIANT_32BIT:
929                 break;
930         case AARCH64_INSN_VARIANT_64BIT:
931                 insn |= AARCH64_INSN_SF_BIT;
932                 break;
933         default:
934                 BUG_ON(1);
935                 return AARCH64_BREAK_FAULT;
936         }
937
938         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
939
940         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
941
942         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
943                                             reg1);
944
945         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
946                                             reg2);
947 }
948
949 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
950                                          enum aarch64_insn_register src,
951                                          enum aarch64_insn_register reg,
952                                          int shift,
953                                          enum aarch64_insn_variant variant,
954                                          enum aarch64_insn_logic_type type)
955 {
956         u32 insn;
957
958         switch (type) {
959         case AARCH64_INSN_LOGIC_AND:
960                 insn = aarch64_insn_get_and_value();
961                 break;
962         case AARCH64_INSN_LOGIC_BIC:
963                 insn = aarch64_insn_get_bic_value();
964                 break;
965         case AARCH64_INSN_LOGIC_ORR:
966                 insn = aarch64_insn_get_orr_value();
967                 break;
968         case AARCH64_INSN_LOGIC_ORN:
969                 insn = aarch64_insn_get_orn_value();
970                 break;
971         case AARCH64_INSN_LOGIC_EOR:
972                 insn = aarch64_insn_get_eor_value();
973                 break;
974         case AARCH64_INSN_LOGIC_EON:
975                 insn = aarch64_insn_get_eon_value();
976                 break;
977         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
978                 insn = aarch64_insn_get_ands_value();
979                 break;
980         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
981                 insn = aarch64_insn_get_bics_value();
982                 break;
983         default:
984                 BUG_ON(1);
985                 return AARCH64_BREAK_FAULT;
986         }
987
988         switch (variant) {
989         case AARCH64_INSN_VARIANT_32BIT:
990                 BUG_ON(shift & ~(SZ_32 - 1));
991                 break;
992         case AARCH64_INSN_VARIANT_64BIT:
993                 insn |= AARCH64_INSN_SF_BIT;
994                 BUG_ON(shift & ~(SZ_64 - 1));
995                 break;
996         default:
997                 BUG_ON(1);
998                 return AARCH64_BREAK_FAULT;
999         }
1000
1001
1002         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1003
1004         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1005
1006         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1007
1008         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1009 }
1010
1011 bool aarch32_insn_is_wide(u32 insn)
1012 {
1013         return insn >= 0xe800;
1014 }
1015
1016 /*
1017  * Macros/defines for extracting register numbers from instruction.
1018  */
1019 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1020 {
1021         return (insn & (0xf << offset)) >> offset;
1022 }
1023
1024 #define OPC2_MASK       0x7
1025 #define OPC2_OFFSET     5
1026 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1027 {
1028         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1029 }
1030
1031 #define CRM_MASK        0xf
1032 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1033 {
1034         return insn & CRM_MASK;
1035 }