1/*
2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
4 *
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/bitops.h>
20#include <linux/bug.h>
21#include <linux/compiler.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/stop_machine.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29
30#include <asm/cacheflush.h>
31#include <asm/debug-monitors.h>
32#include <asm/fixmap.h>
33#include <asm/insn.h>
34
35#define AARCH64_INSN_SF_BIT	BIT(31)
36#define AARCH64_INSN_N_BIT	BIT(22)
37
38static int aarch64_insn_encoding_class[] = {
39	AARCH64_INSN_CLS_UNKNOWN,
40	AARCH64_INSN_CLS_UNKNOWN,
41	AARCH64_INSN_CLS_UNKNOWN,
42	AARCH64_INSN_CLS_UNKNOWN,
43	AARCH64_INSN_CLS_LDST,
44	AARCH64_INSN_CLS_DP_REG,
45	AARCH64_INSN_CLS_LDST,
46	AARCH64_INSN_CLS_DP_FPSIMD,
47	AARCH64_INSN_CLS_DP_IMM,
48	AARCH64_INSN_CLS_DP_IMM,
49	AARCH64_INSN_CLS_BR_SYS,
50	AARCH64_INSN_CLS_BR_SYS,
51	AARCH64_INSN_CLS_LDST,
52	AARCH64_INSN_CLS_DP_REG,
53	AARCH64_INSN_CLS_LDST,
54	AARCH64_INSN_CLS_DP_FPSIMD,
55};
56
57enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
58{
59	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
60}
61
62/* NOP is an alias of HINT */
63bool __kprobes aarch64_insn_is_nop(u32 insn)
64{
65	if (!aarch64_insn_is_hint(insn))
66		return false;
67
68	switch (insn & 0xFE0) {
69	case AARCH64_INSN_HINT_YIELD:
70	case AARCH64_INSN_HINT_WFE:
71	case AARCH64_INSN_HINT_WFI:
72	case AARCH64_INSN_HINT_SEV:
73	case AARCH64_INSN_HINT_SEVL:
74		return false;
75	default:
76		return true;
77	}
78}
79
80static DEFINE_SPINLOCK(patch_lock);
81
82static void __kprobes *patch_map(void *addr, int fixmap)
83{
84	unsigned long uintaddr = (uintptr_t) addr;
85	bool module = !core_kernel_text(uintaddr);
86	struct page *page;
87
88	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
89		page = vmalloc_to_page(addr);
90	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
91		page = virt_to_page(addr);
92	else
93		return addr;
94
95	BUG_ON(!page);
96	set_fixmap(fixmap, page_to_phys(page));
97
98	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
99}
100
101static void __kprobes patch_unmap(int fixmap)
102{
103	clear_fixmap(fixmap);
104}
105/*
106 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
107 * little-endian.
108 */
109int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
110{
111	int ret;
112	u32 val;
113
114	ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
115	if (!ret)
116		*insnp = le32_to_cpu(val);
117
118	return ret;
119}
120
121static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
122{
123	void *waddr = addr;
124	unsigned long flags = 0;
125	int ret;
126
127	spin_lock_irqsave(&patch_lock, flags);
128	waddr = patch_map(addr, FIX_TEXT_POKE0);
129
130	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
131
132	patch_unmap(FIX_TEXT_POKE0);
133	spin_unlock_irqrestore(&patch_lock, flags);
134
135	return ret;
136}
137
138int __kprobes aarch64_insn_write(void *addr, u32 insn)
139{
140	insn = cpu_to_le32(insn);
141	return __aarch64_insn_write(addr, insn);
142}
143
144static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
145{
146	if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
147		return false;
148
149	return	aarch64_insn_is_b(insn) ||
150		aarch64_insn_is_bl(insn) ||
151		aarch64_insn_is_svc(insn) ||
152		aarch64_insn_is_hvc(insn) ||
153		aarch64_insn_is_smc(insn) ||
154		aarch64_insn_is_brk(insn) ||
155		aarch64_insn_is_nop(insn);
156}
157
158/*
159 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
160 * Section B2.6.5 "Concurrent modification and execution of instructions":
161 * Concurrent modification and execution of instructions can lead to the
162 * resulting instruction performing any behavior that can be achieved by
163 * executing any sequence of instructions that can be executed from the
164 * same Exception level, except where the instruction before modification
165 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
166 * or SMC instruction.
167 */
168bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
169{
170	return __aarch64_insn_hotpatch_safe(old_insn) &&
171	       __aarch64_insn_hotpatch_safe(new_insn);
172}
173
174int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
175{
176	u32 *tp = addr;
177	int ret;
178
179	/* A64 instructions must be word aligned */
180	if ((uintptr_t)tp & 0x3)
181		return -EINVAL;
182
183	ret = aarch64_insn_write(tp, insn);
184	if (ret == 0)
185		flush_icache_range((uintptr_t)tp,
186				   (uintptr_t)tp + AARCH64_INSN_SIZE);
187
188	return ret;
189}
190
191struct aarch64_insn_patch {
192	void		**text_addrs;
193	u32		*new_insns;
194	int		insn_cnt;
195	atomic_t	cpu_count;
196};
197
198static int __kprobes aarch64_insn_patch_text_cb(void *arg)
199{
200	int i, ret = 0;
201	struct aarch64_insn_patch *pp = arg;
202
203	/* The first CPU becomes master */
204	if (atomic_inc_return(&pp->cpu_count) == 1) {
205		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
206			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
207							     pp->new_insns[i]);
208		/*
209		 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
210		 * which ends with "dsb; isb" pair guaranteeing global
211		 * visibility.
212		 */
213		/* Notify other processors with an additional increment. */
214		atomic_inc(&pp->cpu_count);
215	} else {
216		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
217			cpu_relax();
218		isb();
219	}
220
221	return ret;
222}
223
224int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
225{
226	struct aarch64_insn_patch patch = {
227		.text_addrs = addrs,
228		.new_insns = insns,
229		.insn_cnt = cnt,
230		.cpu_count = ATOMIC_INIT(0),
231	};
232
233	if (cnt <= 0)
234		return -EINVAL;
235
236	return stop_machine(aarch64_insn_patch_text_cb, &patch,
237			    cpu_online_mask);
238}
239
240int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
241{
242	int ret;
243	u32 insn;
244
245	/* Unsafe to patch multiple instructions without synchronizaiton */
246	if (cnt == 1) {
247		ret = aarch64_insn_read(addrs[0], &insn);
248		if (ret)
249			return ret;
250
251		if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
252			/*
253			 * ARMv8 architecture doesn't guarantee all CPUs see
254			 * the new instruction after returning from function
255			 * aarch64_insn_patch_text_nosync(). So send IPIs to
256			 * all other CPUs to achieve instruction
257			 * synchronization.
258			 */
259			ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
260			kick_all_cpus_sync();
261			return ret;
262		}
263	}
264
265	return aarch64_insn_patch_text_sync(addrs, insns, cnt);
266}
267
268static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
269						u32 *maskp, int *shiftp)
270{
271	u32 mask;
272	int shift;
273
274	switch (type) {
275	case AARCH64_INSN_IMM_26:
276		mask = BIT(26) - 1;
277		shift = 0;
278		break;
279	case AARCH64_INSN_IMM_19:
280		mask = BIT(19) - 1;
281		shift = 5;
282		break;
283	case AARCH64_INSN_IMM_16:
284		mask = BIT(16) - 1;
285		shift = 5;
286		break;
287	case AARCH64_INSN_IMM_14:
288		mask = BIT(14) - 1;
289		shift = 5;
290		break;
291	case AARCH64_INSN_IMM_12:
292		mask = BIT(12) - 1;
293		shift = 10;
294		break;
295	case AARCH64_INSN_IMM_9:
296		mask = BIT(9) - 1;
297		shift = 12;
298		break;
299	case AARCH64_INSN_IMM_7:
300		mask = BIT(7) - 1;
301		shift = 15;
302		break;
303	case AARCH64_INSN_IMM_6:
304	case AARCH64_INSN_IMM_S:
305		mask = BIT(6) - 1;
306		shift = 10;
307		break;
308	case AARCH64_INSN_IMM_R:
309		mask = BIT(6) - 1;
310		shift = 16;
311		break;
312	default:
313		return -EINVAL;
314	}
315
316	*maskp = mask;
317	*shiftp = shift;
318
319	return 0;
320}
321
322#define ADR_IMM_HILOSPLIT	2
323#define ADR_IMM_SIZE		SZ_2M
324#define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
325#define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
326#define ADR_IMM_LOSHIFT		29
327#define ADR_IMM_HISHIFT		5
328
329u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
330{
331	u32 immlo, immhi, mask;
332	int shift;
333
334	switch (type) {
335	case AARCH64_INSN_IMM_ADR:
336		shift = 0;
337		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
338		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
339		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
340		mask = ADR_IMM_SIZE - 1;
341		break;
342	default:
343		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
344			pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
345			       type);
346			return 0;
347		}
348	}
349
350	return (insn >> shift) & mask;
351}
352
353u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
354				  u32 insn, u64 imm)
355{
356	u32 immlo, immhi, mask;
357	int shift;
358
359	switch (type) {
360	case AARCH64_INSN_IMM_ADR:
361		shift = 0;
362		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
363		imm >>= ADR_IMM_HILOSPLIT;
364		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
365		imm = immlo | immhi;
366		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
367			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
368		break;
369	default:
370		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
371			pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
372			       type);
373			return 0;
374		}
375	}
376
377	/* Update the immediate field. */
378	insn &= ~(mask << shift);
379	insn |= (imm & mask) << shift;
380
381	return insn;
382}
383
384static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
385					u32 insn,
386					enum aarch64_insn_register reg)
387{
388	int shift;
389
390	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
391		pr_err("%s: unknown register encoding %d\n", __func__, reg);
392		return 0;
393	}
394
395	switch (type) {
396	case AARCH64_INSN_REGTYPE_RT:
397	case AARCH64_INSN_REGTYPE_RD:
398		shift = 0;
399		break;
400	case AARCH64_INSN_REGTYPE_RN:
401		shift = 5;
402		break;
403	case AARCH64_INSN_REGTYPE_RT2:
404	case AARCH64_INSN_REGTYPE_RA:
405		shift = 10;
406		break;
407	case AARCH64_INSN_REGTYPE_RM:
408		shift = 16;
409		break;
410	default:
411		pr_err("%s: unknown register type encoding %d\n", __func__,
412		       type);
413		return 0;
414	}
415
416	insn &= ~(GENMASK(4, 0) << shift);
417	insn |= reg << shift;
418
419	return insn;
420}
421
422static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
423					 u32 insn)
424{
425	u32 size;
426
427	switch (type) {
428	case AARCH64_INSN_SIZE_8:
429		size = 0;
430		break;
431	case AARCH64_INSN_SIZE_16:
432		size = 1;
433		break;
434	case AARCH64_INSN_SIZE_32:
435		size = 2;
436		break;
437	case AARCH64_INSN_SIZE_64:
438		size = 3;
439		break;
440	default:
441		pr_err("%s: unknown size encoding %d\n", __func__, type);
442		return 0;
443	}
444
445	insn &= ~GENMASK(31, 30);
446	insn |= size << 30;
447
448	return insn;
449}
450
451static inline long branch_imm_common(unsigned long pc, unsigned long addr,
452				     long range)
453{
454	long offset;
455
456	/*
457	 * PC: A 64-bit Program Counter holding the address of the current
458	 * instruction. A64 instructions must be word-aligned.
459	 */
460	BUG_ON((pc & 0x3) || (addr & 0x3));
461
462	offset = ((long)addr - (long)pc);
463	BUG_ON(offset < -range || offset >= range);
464
465	return offset;
466}
467
468u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
469					  enum aarch64_insn_branch_type type)
470{
471	u32 insn;
472	long offset;
473
474	/*
475	 * B/BL support [-128M, 128M) offset
476	 * ARM64 virtual address arrangement guarantees all kernel and module
477	 * texts are within +/-128M.
478	 */
479	offset = branch_imm_common(pc, addr, SZ_128M);
480
481	switch (type) {
482	case AARCH64_INSN_BRANCH_LINK:
483		insn = aarch64_insn_get_bl_value();
484		break;
485	case AARCH64_INSN_BRANCH_NOLINK:
486		insn = aarch64_insn_get_b_value();
487		break;
488	default:
489		BUG_ON(1);
490		return AARCH64_BREAK_FAULT;
491	}
492
493	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
494					     offset >> 2);
495}
496
497u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
498				     enum aarch64_insn_register reg,
499				     enum aarch64_insn_variant variant,
500				     enum aarch64_insn_branch_type type)
501{
502	u32 insn;
503	long offset;
504
505	offset = branch_imm_common(pc, addr, SZ_1M);
506
507	switch (type) {
508	case AARCH64_INSN_BRANCH_COMP_ZERO:
509		insn = aarch64_insn_get_cbz_value();
510		break;
511	case AARCH64_INSN_BRANCH_COMP_NONZERO:
512		insn = aarch64_insn_get_cbnz_value();
513		break;
514	default:
515		BUG_ON(1);
516		return AARCH64_BREAK_FAULT;
517	}
518
519	switch (variant) {
520	case AARCH64_INSN_VARIANT_32BIT:
521		break;
522	case AARCH64_INSN_VARIANT_64BIT:
523		insn |= AARCH64_INSN_SF_BIT;
524		break;
525	default:
526		BUG_ON(1);
527		return AARCH64_BREAK_FAULT;
528	}
529
530	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
531
532	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
533					     offset >> 2);
534}
535
536u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
537				     enum aarch64_insn_condition cond)
538{
539	u32 insn;
540	long offset;
541
542	offset = branch_imm_common(pc, addr, SZ_1M);
543
544	insn = aarch64_insn_get_bcond_value();
545
546	BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
547	insn |= cond;
548
549	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
550					     offset >> 2);
551}
552
553u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
554{
555	return aarch64_insn_get_hint_value() | op;
556}
557
558u32 __kprobes aarch64_insn_gen_nop(void)
559{
560	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
561}
562
563u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
564				enum aarch64_insn_branch_type type)
565{
566	u32 insn;
567
568	switch (type) {
569	case AARCH64_INSN_BRANCH_NOLINK:
570		insn = aarch64_insn_get_br_value();
571		break;
572	case AARCH64_INSN_BRANCH_LINK:
573		insn = aarch64_insn_get_blr_value();
574		break;
575	case AARCH64_INSN_BRANCH_RETURN:
576		insn = aarch64_insn_get_ret_value();
577		break;
578	default:
579		BUG_ON(1);
580		return AARCH64_BREAK_FAULT;
581	}
582
583	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
584}
585
586u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
587				    enum aarch64_insn_register base,
588				    enum aarch64_insn_register offset,
589				    enum aarch64_insn_size_type size,
590				    enum aarch64_insn_ldst_type type)
591{
592	u32 insn;
593
594	switch (type) {
595	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
596		insn = aarch64_insn_get_ldr_reg_value();
597		break;
598	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
599		insn = aarch64_insn_get_str_reg_value();
600		break;
601	default:
602		BUG_ON(1);
603		return AARCH64_BREAK_FAULT;
604	}
605
606	insn = aarch64_insn_encode_ldst_size(size, insn);
607
608	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
609
610	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
611					    base);
612
613	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
614					    offset);
615}
616
617u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
618				     enum aarch64_insn_register reg2,
619				     enum aarch64_insn_register base,
620				     int offset,
621				     enum aarch64_insn_variant variant,
622				     enum aarch64_insn_ldst_type type)
623{
624	u32 insn;
625	int shift;
626
627	switch (type) {
628	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
629		insn = aarch64_insn_get_ldp_pre_value();
630		break;
631	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
632		insn = aarch64_insn_get_stp_pre_value();
633		break;
634	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
635		insn = aarch64_insn_get_ldp_post_value();
636		break;
637	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
638		insn = aarch64_insn_get_stp_post_value();
639		break;
640	default:
641		BUG_ON(1);
642		return AARCH64_BREAK_FAULT;
643	}
644
645	switch (variant) {
646	case AARCH64_INSN_VARIANT_32BIT:
647		/* offset must be multiples of 4 in the range [-256, 252] */
648		BUG_ON(offset & 0x3);
649		BUG_ON(offset < -256 || offset > 252);
650		shift = 2;
651		break;
652	case AARCH64_INSN_VARIANT_64BIT:
653		/* offset must be multiples of 8 in the range [-512, 504] */
654		BUG_ON(offset & 0x7);
655		BUG_ON(offset < -512 || offset > 504);
656		shift = 3;
657		insn |= AARCH64_INSN_SF_BIT;
658		break;
659	default:
660		BUG_ON(1);
661		return AARCH64_BREAK_FAULT;
662	}
663
664	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
665					    reg1);
666
667	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
668					    reg2);
669
670	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
671					    base);
672
673	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
674					     offset >> shift);
675}
676
677u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
678				 enum aarch64_insn_register src,
679				 int imm, enum aarch64_insn_variant variant,
680				 enum aarch64_insn_adsb_type type)
681{
682	u32 insn;
683
684	switch (type) {
685	case AARCH64_INSN_ADSB_ADD:
686		insn = aarch64_insn_get_add_imm_value();
687		break;
688	case AARCH64_INSN_ADSB_SUB:
689		insn = aarch64_insn_get_sub_imm_value();
690		break;
691	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
692		insn = aarch64_insn_get_adds_imm_value();
693		break;
694	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
695		insn = aarch64_insn_get_subs_imm_value();
696		break;
697	default:
698		BUG_ON(1);
699		return AARCH64_BREAK_FAULT;
700	}
701
702	switch (variant) {
703	case AARCH64_INSN_VARIANT_32BIT:
704		break;
705	case AARCH64_INSN_VARIANT_64BIT:
706		insn |= AARCH64_INSN_SF_BIT;
707		break;
708	default:
709		BUG_ON(1);
710		return AARCH64_BREAK_FAULT;
711	}
712
713	BUG_ON(imm & ~(SZ_4K - 1));
714
715	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
716
717	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
718
719	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
720}
721
722u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
723			      enum aarch64_insn_register src,
724			      int immr, int imms,
725			      enum aarch64_insn_variant variant,
726			      enum aarch64_insn_bitfield_type type)
727{
728	u32 insn;
729	u32 mask;
730
731	switch (type) {
732	case AARCH64_INSN_BITFIELD_MOVE:
733		insn = aarch64_insn_get_bfm_value();
734		break;
735	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
736		insn = aarch64_insn_get_ubfm_value();
737		break;
738	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
739		insn = aarch64_insn_get_sbfm_value();
740		break;
741	default:
742		BUG_ON(1);
743		return AARCH64_BREAK_FAULT;
744	}
745
746	switch (variant) {
747	case AARCH64_INSN_VARIANT_32BIT:
748		mask = GENMASK(4, 0);
749		break;
750	case AARCH64_INSN_VARIANT_64BIT:
751		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
752		mask = GENMASK(5, 0);
753		break;
754	default:
755		BUG_ON(1);
756		return AARCH64_BREAK_FAULT;
757	}
758
759	BUG_ON(immr & ~mask);
760	BUG_ON(imms & ~mask);
761
762	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
763
764	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
765
766	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
767
768	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
769}
770
771u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
772			      int imm, int shift,
773			      enum aarch64_insn_variant variant,
774			      enum aarch64_insn_movewide_type type)
775{
776	u32 insn;
777
778	switch (type) {
779	case AARCH64_INSN_MOVEWIDE_ZERO:
780		insn = aarch64_insn_get_movz_value();
781		break;
782	case AARCH64_INSN_MOVEWIDE_KEEP:
783		insn = aarch64_insn_get_movk_value();
784		break;
785	case AARCH64_INSN_MOVEWIDE_INVERSE:
786		insn = aarch64_insn_get_movn_value();
787		break;
788	default:
789		BUG_ON(1);
790		return AARCH64_BREAK_FAULT;
791	}
792
793	BUG_ON(imm & ~(SZ_64K - 1));
794
795	switch (variant) {
796	case AARCH64_INSN_VARIANT_32BIT:
797		BUG_ON(shift != 0 && shift != 16);
798		break;
799	case AARCH64_INSN_VARIANT_64BIT:
800		insn |= AARCH64_INSN_SF_BIT;
801		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
802		       shift != 48);
803		break;
804	default:
805		BUG_ON(1);
806		return AARCH64_BREAK_FAULT;
807	}
808
809	insn |= (shift >> 4) << 21;
810
811	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
812
813	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
814}
815
816u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
817					 enum aarch64_insn_register src,
818					 enum aarch64_insn_register reg,
819					 int shift,
820					 enum aarch64_insn_variant variant,
821					 enum aarch64_insn_adsb_type type)
822{
823	u32 insn;
824
825	switch (type) {
826	case AARCH64_INSN_ADSB_ADD:
827		insn = aarch64_insn_get_add_value();
828		break;
829	case AARCH64_INSN_ADSB_SUB:
830		insn = aarch64_insn_get_sub_value();
831		break;
832	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
833		insn = aarch64_insn_get_adds_value();
834		break;
835	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
836		insn = aarch64_insn_get_subs_value();
837		break;
838	default:
839		BUG_ON(1);
840		return AARCH64_BREAK_FAULT;
841	}
842
843	switch (variant) {
844	case AARCH64_INSN_VARIANT_32BIT:
845		BUG_ON(shift & ~(SZ_32 - 1));
846		break;
847	case AARCH64_INSN_VARIANT_64BIT:
848		insn |= AARCH64_INSN_SF_BIT;
849		BUG_ON(shift & ~(SZ_64 - 1));
850		break;
851	default:
852		BUG_ON(1);
853		return AARCH64_BREAK_FAULT;
854	}
855
856
857	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
858
859	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
860
861	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
862
863	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
864}
865
866u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
867			   enum aarch64_insn_register src,
868			   enum aarch64_insn_variant variant,
869			   enum aarch64_insn_data1_type type)
870{
871	u32 insn;
872
873	switch (type) {
874	case AARCH64_INSN_DATA1_REVERSE_16:
875		insn = aarch64_insn_get_rev16_value();
876		break;
877	case AARCH64_INSN_DATA1_REVERSE_32:
878		insn = aarch64_insn_get_rev32_value();
879		break;
880	case AARCH64_INSN_DATA1_REVERSE_64:
881		BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
882		insn = aarch64_insn_get_rev64_value();
883		break;
884	default:
885		BUG_ON(1);
886		return AARCH64_BREAK_FAULT;
887	}
888
889	switch (variant) {
890	case AARCH64_INSN_VARIANT_32BIT:
891		break;
892	case AARCH64_INSN_VARIANT_64BIT:
893		insn |= AARCH64_INSN_SF_BIT;
894		break;
895	default:
896		BUG_ON(1);
897		return AARCH64_BREAK_FAULT;
898	}
899
900	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
901
902	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
903}
904
905u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
906			   enum aarch64_insn_register src,
907			   enum aarch64_insn_register reg,
908			   enum aarch64_insn_variant variant,
909			   enum aarch64_insn_data2_type type)
910{
911	u32 insn;
912
913	switch (type) {
914	case AARCH64_INSN_DATA2_UDIV:
915		insn = aarch64_insn_get_udiv_value();
916		break;
917	case AARCH64_INSN_DATA2_SDIV:
918		insn = aarch64_insn_get_sdiv_value();
919		break;
920	case AARCH64_INSN_DATA2_LSLV:
921		insn = aarch64_insn_get_lslv_value();
922		break;
923	case AARCH64_INSN_DATA2_LSRV:
924		insn = aarch64_insn_get_lsrv_value();
925		break;
926	case AARCH64_INSN_DATA2_ASRV:
927		insn = aarch64_insn_get_asrv_value();
928		break;
929	case AARCH64_INSN_DATA2_RORV:
930		insn = aarch64_insn_get_rorv_value();
931		break;
932	default:
933		BUG_ON(1);
934		return AARCH64_BREAK_FAULT;
935	}
936
937	switch (variant) {
938	case AARCH64_INSN_VARIANT_32BIT:
939		break;
940	case AARCH64_INSN_VARIANT_64BIT:
941		insn |= AARCH64_INSN_SF_BIT;
942		break;
943	default:
944		BUG_ON(1);
945		return AARCH64_BREAK_FAULT;
946	}
947
948	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
949
950	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
951
952	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
953}
954
955u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
956			   enum aarch64_insn_register src,
957			   enum aarch64_insn_register reg1,
958			   enum aarch64_insn_register reg2,
959			   enum aarch64_insn_variant variant,
960			   enum aarch64_insn_data3_type type)
961{
962	u32 insn;
963
964	switch (type) {
965	case AARCH64_INSN_DATA3_MADD:
966		insn = aarch64_insn_get_madd_value();
967		break;
968	case AARCH64_INSN_DATA3_MSUB:
969		insn = aarch64_insn_get_msub_value();
970		break;
971	default:
972		BUG_ON(1);
973		return AARCH64_BREAK_FAULT;
974	}
975
976	switch (variant) {
977	case AARCH64_INSN_VARIANT_32BIT:
978		break;
979	case AARCH64_INSN_VARIANT_64BIT:
980		insn |= AARCH64_INSN_SF_BIT;
981		break;
982	default:
983		BUG_ON(1);
984		return AARCH64_BREAK_FAULT;
985	}
986
987	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
988
989	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
990
991	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
992					    reg1);
993
994	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
995					    reg2);
996}
997
998u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
999					 enum aarch64_insn_register src,
1000					 enum aarch64_insn_register reg,
1001					 int shift,
1002					 enum aarch64_insn_variant variant,
1003					 enum aarch64_insn_logic_type type)
1004{
1005	u32 insn;
1006
1007	switch (type) {
1008	case AARCH64_INSN_LOGIC_AND:
1009		insn = aarch64_insn_get_and_value();
1010		break;
1011	case AARCH64_INSN_LOGIC_BIC:
1012		insn = aarch64_insn_get_bic_value();
1013		break;
1014	case AARCH64_INSN_LOGIC_ORR:
1015		insn = aarch64_insn_get_orr_value();
1016		break;
1017	case AARCH64_INSN_LOGIC_ORN:
1018		insn = aarch64_insn_get_orn_value();
1019		break;
1020	case AARCH64_INSN_LOGIC_EOR:
1021		insn = aarch64_insn_get_eor_value();
1022		break;
1023	case AARCH64_INSN_LOGIC_EON:
1024		insn = aarch64_insn_get_eon_value();
1025		break;
1026	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1027		insn = aarch64_insn_get_ands_value();
1028		break;
1029	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1030		insn = aarch64_insn_get_bics_value();
1031		break;
1032	default:
1033		BUG_ON(1);
1034		return AARCH64_BREAK_FAULT;
1035	}
1036
1037	switch (variant) {
1038	case AARCH64_INSN_VARIANT_32BIT:
1039		BUG_ON(shift & ~(SZ_32 - 1));
1040		break;
1041	case AARCH64_INSN_VARIANT_64BIT:
1042		insn |= AARCH64_INSN_SF_BIT;
1043		BUG_ON(shift & ~(SZ_64 - 1));
1044		break;
1045	default:
1046		BUG_ON(1);
1047		return AARCH64_BREAK_FAULT;
1048	}
1049
1050
1051	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1052
1053	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1054
1055	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1056
1057	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1058}
1059
1060bool aarch32_insn_is_wide(u32 insn)
1061{
1062	return insn >= 0xe800;
1063}
1064
1065/*
1066 * Macros/defines for extracting register numbers from instruction.
1067 */
1068u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1069{
1070	return (insn & (0xf << offset)) >> offset;
1071}
1072
1073#define OPC2_MASK	0x7
1074#define OPC2_OFFSET	5
1075u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1076{
1077	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1078}
1079
1080#define CRM_MASK	0xf
1081u32 aarch32_insn_mcr_extract_crm(u32 insn)
1082{
1083	return insn & CRM_MASK;
1084}
1085