root/arch/arm64/kernel/insn.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. aarch64_get_insn_class
  2. aarch64_insn_is_nop
  3. aarch64_insn_is_branch_imm
  4. patch_map
  5. patch_unmap
  6. aarch64_insn_read
  7. __aarch64_insn_write
  8. aarch64_insn_write
  9. aarch64_insn_uses_literal
  10. aarch64_insn_is_branch
  11. aarch64_insn_patch_text_nosync
  12. aarch64_insn_patch_text_cb
  13. aarch64_insn_patch_text
  14. aarch64_get_imm_shift_mask
  15. aarch64_insn_decode_immediate
  16. aarch64_insn_encode_immediate
  17. aarch64_insn_decode_register
  18. aarch64_insn_encode_register
  19. aarch64_insn_encode_ldst_size
  20. branch_imm_common
  21. aarch64_insn_gen_branch_imm
  22. aarch64_insn_gen_comp_branch_imm
  23. aarch64_insn_gen_cond_branch_imm
  24. aarch64_insn_gen_hint
  25. aarch64_insn_gen_nop
  26. aarch64_insn_gen_branch_reg
  27. aarch64_insn_gen_load_store_reg
  28. aarch64_insn_gen_load_store_pair
  29. aarch64_insn_gen_load_store_ex
  30. aarch64_insn_gen_ldadd
  31. aarch64_insn_gen_stadd
  32. aarch64_insn_encode_prfm_imm
  33. aarch64_insn_gen_prefetch
  34. aarch64_insn_gen_add_sub_imm
  35. aarch64_insn_gen_bitfield
  36. aarch64_insn_gen_movewide
  37. aarch64_insn_gen_add_sub_shifted_reg
  38. aarch64_insn_gen_data1
  39. aarch64_insn_gen_data2
  40. aarch64_insn_gen_data3
  41. aarch64_insn_gen_logical_shifted_reg
  42. aarch64_insn_gen_adr
  43. aarch64_get_branch_offset
  44. aarch64_set_branch_offset
  45. aarch64_insn_adrp_get_offset
  46. aarch64_insn_adrp_set_offset
  47. aarch64_insn_extract_system_reg
  48. aarch32_insn_is_wide
  49. aarch32_insn_extract_reg_num
  50. aarch32_insn_mcr_extract_opc2
  51. aarch32_insn_mcr_extract_crm
  52. __check_eq
  53. __check_ne
  54. __check_cs
  55. __check_cc
  56. __check_mi
  57. __check_pl
  58. __check_vs
  59. __check_vc
  60. __check_hi
  61. __check_ls
  62. __check_ge
  63. __check_lt
  64. __check_gt
  65. __check_le
  66. __check_al
  67. range_of_ones
  68. aarch64_encode_immediate
  69. aarch64_insn_gen_logical_immediate
  70. aarch64_insn_gen_extr

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2013 Huawei Ltd.
   4  * Author: Jiang Liu <liuj97@gmail.com>
   5  *
   6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
   7  */
   8 #include <linux/bitops.h>
   9 #include <linux/bug.h>
  10 #include <linux/compiler.h>
  11 #include <linux/kernel.h>
  12 #include <linux/mm.h>
  13 #include <linux/smp.h>
  14 #include <linux/spinlock.h>
  15 #include <linux/stop_machine.h>
  16 #include <linux/types.h>
  17 #include <linux/uaccess.h>
  18 
  19 #include <asm/cacheflush.h>
  20 #include <asm/debug-monitors.h>
  21 #include <asm/fixmap.h>
  22 #include <asm/insn.h>
  23 #include <asm/kprobes.h>
  24 
  25 #define AARCH64_INSN_SF_BIT     BIT(31)
  26 #define AARCH64_INSN_N_BIT      BIT(22)
  27 #define AARCH64_INSN_LSL_12     BIT(22)
  28 
  29 static const int aarch64_insn_encoding_class[] = {
  30         AARCH64_INSN_CLS_UNKNOWN,
  31         AARCH64_INSN_CLS_UNKNOWN,
  32         AARCH64_INSN_CLS_UNKNOWN,
  33         AARCH64_INSN_CLS_UNKNOWN,
  34         AARCH64_INSN_CLS_LDST,
  35         AARCH64_INSN_CLS_DP_REG,
  36         AARCH64_INSN_CLS_LDST,
  37         AARCH64_INSN_CLS_DP_FPSIMD,
  38         AARCH64_INSN_CLS_DP_IMM,
  39         AARCH64_INSN_CLS_DP_IMM,
  40         AARCH64_INSN_CLS_BR_SYS,
  41         AARCH64_INSN_CLS_BR_SYS,
  42         AARCH64_INSN_CLS_LDST,
  43         AARCH64_INSN_CLS_DP_REG,
  44         AARCH64_INSN_CLS_LDST,
  45         AARCH64_INSN_CLS_DP_FPSIMD,
  46 };
  47 
  48 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  49 {
  50         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  51 }
  52 
  53 /* NOP is an alias of HINT */
  54 bool __kprobes aarch64_insn_is_nop(u32 insn)
  55 {
  56         if (!aarch64_insn_is_hint(insn))
  57                 return false;
  58 
  59         switch (insn & 0xFE0) {
  60         case AARCH64_INSN_HINT_YIELD:
  61         case AARCH64_INSN_HINT_WFE:
  62         case AARCH64_INSN_HINT_WFI:
  63         case AARCH64_INSN_HINT_SEV:
  64         case AARCH64_INSN_HINT_SEVL:
  65                 return false;
  66         default:
  67                 return true;
  68         }
  69 }
  70 
  71 bool aarch64_insn_is_branch_imm(u32 insn)
  72 {
  73         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  74                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  75                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  76                 aarch64_insn_is_bcond(insn));
  77 }
  78 
  79 static DEFINE_RAW_SPINLOCK(patch_lock);
  80 
  81 static void __kprobes *patch_map(void *addr, int fixmap)
  82 {
  83         unsigned long uintaddr = (uintptr_t) addr;
  84         bool module = !core_kernel_text(uintaddr);
  85         struct page *page;
  86 
  87         if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  88                 page = vmalloc_to_page(addr);
  89         else if (!module)
  90                 page = phys_to_page(__pa_symbol(addr));
  91         else
  92                 return addr;
  93 
  94         BUG_ON(!page);
  95         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  96                         (uintaddr & ~PAGE_MASK));
  97 }
  98 
  99 static void __kprobes patch_unmap(int fixmap)
 100 {
 101         clear_fixmap(fixmap);
 102 }
 103 /*
 104  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
 105  * little-endian.
 106  */
 107 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
 108 {
 109         int ret;
 110         __le32 val;
 111 
 112         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
 113         if (!ret)
 114                 *insnp = le32_to_cpu(val);
 115 
 116         return ret;
 117 }
 118 
 119 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
 120 {
 121         void *waddr = addr;
 122         unsigned long flags = 0;
 123         int ret;
 124 
 125         raw_spin_lock_irqsave(&patch_lock, flags);
 126         waddr = patch_map(addr, FIX_TEXT_POKE0);
 127 
 128         ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 129 
 130         patch_unmap(FIX_TEXT_POKE0);
 131         raw_spin_unlock_irqrestore(&patch_lock, flags);
 132 
 133         return ret;
 134 }
 135 
 136 int __kprobes aarch64_insn_write(void *addr, u32 insn)
 137 {
 138         return __aarch64_insn_write(addr, cpu_to_le32(insn));
 139 }
 140 
 141 bool __kprobes aarch64_insn_uses_literal(u32 insn)
 142 {
 143         /* ldr/ldrsw (literal), prfm */
 144 
 145         return aarch64_insn_is_ldr_lit(insn) ||
 146                 aarch64_insn_is_ldrsw_lit(insn) ||
 147                 aarch64_insn_is_adr_adrp(insn) ||
 148                 aarch64_insn_is_prfm_lit(insn);
 149 }
 150 
 151 bool __kprobes aarch64_insn_is_branch(u32 insn)
 152 {
 153         /* b, bl, cb*, tb*, b.cond, br, blr */
 154 
 155         return aarch64_insn_is_b(insn) ||
 156                 aarch64_insn_is_bl(insn) ||
 157                 aarch64_insn_is_cbz(insn) ||
 158                 aarch64_insn_is_cbnz(insn) ||
 159                 aarch64_insn_is_tbz(insn) ||
 160                 aarch64_insn_is_tbnz(insn) ||
 161                 aarch64_insn_is_ret(insn) ||
 162                 aarch64_insn_is_br(insn) ||
 163                 aarch64_insn_is_blr(insn) ||
 164                 aarch64_insn_is_bcond(insn);
 165 }
 166 
 167 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
 168 {
 169         u32 *tp = addr;
 170         int ret;
 171 
 172         /* A64 instructions must be word aligned */
 173         if ((uintptr_t)tp & 0x3)
 174                 return -EINVAL;
 175 
 176         ret = aarch64_insn_write(tp, insn);
 177         if (ret == 0)
 178                 __flush_icache_range((uintptr_t)tp,
 179                                      (uintptr_t)tp + AARCH64_INSN_SIZE);
 180 
 181         return ret;
 182 }
 183 
 184 struct aarch64_insn_patch {
 185         void            **text_addrs;
 186         u32             *new_insns;
 187         int             insn_cnt;
 188         atomic_t        cpu_count;
 189 };
 190 
 191 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
 192 {
 193         int i, ret = 0;
 194         struct aarch64_insn_patch *pp = arg;
 195 
 196         /* The first CPU becomes master */
 197         if (atomic_inc_return(&pp->cpu_count) == 1) {
 198                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
 199                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
 200                                                              pp->new_insns[i]);
 201                 /* Notify other processors with an additional increment. */
 202                 atomic_inc(&pp->cpu_count);
 203         } else {
 204                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
 205                         cpu_relax();
 206                 isb();
 207         }
 208 
 209         return ret;
 210 }
 211 
 212 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
 213 {
 214         struct aarch64_insn_patch patch = {
 215                 .text_addrs = addrs,
 216                 .new_insns = insns,
 217                 .insn_cnt = cnt,
 218                 .cpu_count = ATOMIC_INIT(0),
 219         };
 220 
 221         if (cnt <= 0)
 222                 return -EINVAL;
 223 
 224         return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
 225                                        cpu_online_mask);
 226 }
 227 
 228 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
 229                                                 u32 *maskp, int *shiftp)
 230 {
 231         u32 mask;
 232         int shift;
 233 
 234         switch (type) {
 235         case AARCH64_INSN_IMM_26:
 236                 mask = BIT(26) - 1;
 237                 shift = 0;
 238                 break;
 239         case AARCH64_INSN_IMM_19:
 240                 mask = BIT(19) - 1;
 241                 shift = 5;
 242                 break;
 243         case AARCH64_INSN_IMM_16:
 244                 mask = BIT(16) - 1;
 245                 shift = 5;
 246                 break;
 247         case AARCH64_INSN_IMM_14:
 248                 mask = BIT(14) - 1;
 249                 shift = 5;
 250                 break;
 251         case AARCH64_INSN_IMM_12:
 252                 mask = BIT(12) - 1;
 253                 shift = 10;
 254                 break;
 255         case AARCH64_INSN_IMM_9:
 256                 mask = BIT(9) - 1;
 257                 shift = 12;
 258                 break;
 259         case AARCH64_INSN_IMM_7:
 260                 mask = BIT(7) - 1;
 261                 shift = 15;
 262                 break;
 263         case AARCH64_INSN_IMM_6:
 264         case AARCH64_INSN_IMM_S:
 265                 mask = BIT(6) - 1;
 266                 shift = 10;
 267                 break;
 268         case AARCH64_INSN_IMM_R:
 269                 mask = BIT(6) - 1;
 270                 shift = 16;
 271                 break;
 272         case AARCH64_INSN_IMM_N:
 273                 mask = 1;
 274                 shift = 22;
 275                 break;
 276         default:
 277                 return -EINVAL;
 278         }
 279 
 280         *maskp = mask;
 281         *shiftp = shift;
 282 
 283         return 0;
 284 }
 285 
 286 #define ADR_IMM_HILOSPLIT       2
 287 #define ADR_IMM_SIZE            SZ_2M
 288 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
 289 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
 290 #define ADR_IMM_LOSHIFT         29
 291 #define ADR_IMM_HISHIFT         5
 292 
 293 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
 294 {
 295         u32 immlo, immhi, mask;
 296         int shift;
 297 
 298         switch (type) {
 299         case AARCH64_INSN_IMM_ADR:
 300                 shift = 0;
 301                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
 302                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
 303                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
 304                 mask = ADR_IMM_SIZE - 1;
 305                 break;
 306         default:
 307                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 308                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
 309                                type);
 310                         return 0;
 311                 }
 312         }
 313 
 314         return (insn >> shift) & mask;
 315 }
 316 
 317 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 318                                   u32 insn, u64 imm)
 319 {
 320         u32 immlo, immhi, mask;
 321         int shift;
 322 
 323         if (insn == AARCH64_BREAK_FAULT)
 324                 return AARCH64_BREAK_FAULT;
 325 
 326         switch (type) {
 327         case AARCH64_INSN_IMM_ADR:
 328                 shift = 0;
 329                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
 330                 imm >>= ADR_IMM_HILOSPLIT;
 331                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
 332                 imm = immlo | immhi;
 333                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
 334                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
 335                 break;
 336         default:
 337                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 338                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
 339                                type);
 340                         return AARCH64_BREAK_FAULT;
 341                 }
 342         }
 343 
 344         /* Update the immediate field. */
 345         insn &= ~(mask << shift);
 346         insn |= (imm & mask) << shift;
 347 
 348         return insn;
 349 }
 350 
 351 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
 352                                         u32 insn)
 353 {
 354         int shift;
 355 
 356         switch (type) {
 357         case AARCH64_INSN_REGTYPE_RT:
 358         case AARCH64_INSN_REGTYPE_RD:
 359                 shift = 0;
 360                 break;
 361         case AARCH64_INSN_REGTYPE_RN:
 362                 shift = 5;
 363                 break;
 364         case AARCH64_INSN_REGTYPE_RT2:
 365         case AARCH64_INSN_REGTYPE_RA:
 366                 shift = 10;
 367                 break;
 368         case AARCH64_INSN_REGTYPE_RM:
 369                 shift = 16;
 370                 break;
 371         default:
 372                 pr_err("%s: unknown register type encoding %d\n", __func__,
 373                        type);
 374                 return 0;
 375         }
 376 
 377         return (insn >> shift) & GENMASK(4, 0);
 378 }
 379 
 380 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 381                                         u32 insn,
 382                                         enum aarch64_insn_register reg)
 383 {
 384         int shift;
 385 
 386         if (insn == AARCH64_BREAK_FAULT)
 387                 return AARCH64_BREAK_FAULT;
 388 
 389         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
 390                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
 391                 return AARCH64_BREAK_FAULT;
 392         }
 393 
 394         switch (type) {
 395         case AARCH64_INSN_REGTYPE_RT:
 396         case AARCH64_INSN_REGTYPE_RD:
 397                 shift = 0;
 398                 break;
 399         case AARCH64_INSN_REGTYPE_RN:
 400                 shift = 5;
 401                 break;
 402         case AARCH64_INSN_REGTYPE_RT2:
 403         case AARCH64_INSN_REGTYPE_RA:
 404                 shift = 10;
 405                 break;
 406         case AARCH64_INSN_REGTYPE_RM:
 407         case AARCH64_INSN_REGTYPE_RS:
 408                 shift = 16;
 409                 break;
 410         default:
 411                 pr_err("%s: unknown register type encoding %d\n", __func__,
 412                        type);
 413                 return AARCH64_BREAK_FAULT;
 414         }
 415 
 416         insn &= ~(GENMASK(4, 0) << shift);
 417         insn |= reg << shift;
 418 
 419         return insn;
 420 }
 421 
 422 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
 423                                          u32 insn)
 424 {
 425         u32 size;
 426 
 427         switch (type) {
 428         case AARCH64_INSN_SIZE_8:
 429                 size = 0;
 430                 break;
 431         case AARCH64_INSN_SIZE_16:
 432                 size = 1;
 433                 break;
 434         case AARCH64_INSN_SIZE_32:
 435                 size = 2;
 436                 break;
 437         case AARCH64_INSN_SIZE_64:
 438                 size = 3;
 439                 break;
 440         default:
 441                 pr_err("%s: unknown size encoding %d\n", __func__, type);
 442                 return AARCH64_BREAK_FAULT;
 443         }
 444 
 445         insn &= ~GENMASK(31, 30);
 446         insn |= size << 30;
 447 
 448         return insn;
 449 }
 450 
 451 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
 452                                      long range)
 453 {
 454         long offset;
 455 
 456         if ((pc & 0x3) || (addr & 0x3)) {
 457                 pr_err("%s: A64 instructions must be word aligned\n", __func__);
 458                 return range;
 459         }
 460 
 461         offset = ((long)addr - (long)pc);
 462 
 463         if (offset < -range || offset >= range) {
 464                 pr_err("%s: offset out of range\n", __func__);
 465                 return range;
 466         }
 467 
 468         return offset;
 469 }
 470 
 471 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 472                                           enum aarch64_insn_branch_type type)
 473 {
 474         u32 insn;
 475         long offset;
 476 
 477         /*
 478          * B/BL support [-128M, 128M) offset
 479          * ARM64 virtual address arrangement guarantees all kernel and module
 480          * texts are within +/-128M.
 481          */
 482         offset = branch_imm_common(pc, addr, SZ_128M);
 483         if (offset >= SZ_128M)
 484                 return AARCH64_BREAK_FAULT;
 485 
 486         switch (type) {
 487         case AARCH64_INSN_BRANCH_LINK:
 488                 insn = aarch64_insn_get_bl_value();
 489                 break;
 490         case AARCH64_INSN_BRANCH_NOLINK:
 491                 insn = aarch64_insn_get_b_value();
 492                 break;
 493         default:
 494                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
 495                 return AARCH64_BREAK_FAULT;
 496         }
 497 
 498         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
 499                                              offset >> 2);
 500 }
 501 
 502 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
 503                                      enum aarch64_insn_register reg,
 504                                      enum aarch64_insn_variant variant,
 505                                      enum aarch64_insn_branch_type type)
 506 {
 507         u32 insn;
 508         long offset;
 509 
 510         offset = branch_imm_common(pc, addr, SZ_1M);
 511         if (offset >= SZ_1M)
 512                 return AARCH64_BREAK_FAULT;
 513 
 514         switch (type) {
 515         case AARCH64_INSN_BRANCH_COMP_ZERO:
 516                 insn = aarch64_insn_get_cbz_value();
 517                 break;
 518         case AARCH64_INSN_BRANCH_COMP_NONZERO:
 519                 insn = aarch64_insn_get_cbnz_value();
 520                 break;
 521         default:
 522                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
 523                 return AARCH64_BREAK_FAULT;
 524         }
 525 
 526         switch (variant) {
 527         case AARCH64_INSN_VARIANT_32BIT:
 528                 break;
 529         case AARCH64_INSN_VARIANT_64BIT:
 530                 insn |= AARCH64_INSN_SF_BIT;
 531                 break;
 532         default:
 533                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 534                 return AARCH64_BREAK_FAULT;
 535         }
 536 
 537         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
 538 
 539         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
 540                                              offset >> 2);
 541 }
 542 
 543 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
 544                                      enum aarch64_insn_condition cond)
 545 {
 546         u32 insn;
 547         long offset;
 548 
 549         offset = branch_imm_common(pc, addr, SZ_1M);
 550 
 551         insn = aarch64_insn_get_bcond_value();
 552 
 553         if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
 554                 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
 555                 return AARCH64_BREAK_FAULT;
 556         }
 557         insn |= cond;
 558 
 559         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
 560                                              offset >> 2);
 561 }
 562 
 563 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
 564 {
 565         return aarch64_insn_get_hint_value() | op;
 566 }
 567 
 568 u32 __kprobes aarch64_insn_gen_nop(void)
 569 {
 570         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
 571 }
 572 
 573 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
 574                                 enum aarch64_insn_branch_type type)
 575 {
 576         u32 insn;
 577 
 578         switch (type) {
 579         case AARCH64_INSN_BRANCH_NOLINK:
 580                 insn = aarch64_insn_get_br_value();
 581                 break;
 582         case AARCH64_INSN_BRANCH_LINK:
 583                 insn = aarch64_insn_get_blr_value();
 584                 break;
 585         case AARCH64_INSN_BRANCH_RETURN:
 586                 insn = aarch64_insn_get_ret_value();
 587                 break;
 588         default:
 589                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
 590                 return AARCH64_BREAK_FAULT;
 591         }
 592 
 593         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
 594 }
 595 
 596 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
 597                                     enum aarch64_insn_register base,
 598                                     enum aarch64_insn_register offset,
 599                                     enum aarch64_insn_size_type size,
 600                                     enum aarch64_insn_ldst_type type)
 601 {
 602         u32 insn;
 603 
 604         switch (type) {
 605         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
 606                 insn = aarch64_insn_get_ldr_reg_value();
 607                 break;
 608         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
 609                 insn = aarch64_insn_get_str_reg_value();
 610                 break;
 611         default:
 612                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 613                 return AARCH64_BREAK_FAULT;
 614         }
 615 
 616         insn = aarch64_insn_encode_ldst_size(size, insn);
 617 
 618         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
 619 
 620         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 621                                             base);
 622 
 623         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
 624                                             offset);
 625 }
 626 
 627 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
 628                                      enum aarch64_insn_register reg2,
 629                                      enum aarch64_insn_register base,
 630                                      int offset,
 631                                      enum aarch64_insn_variant variant,
 632                                      enum aarch64_insn_ldst_type type)
 633 {
 634         u32 insn;
 635         int shift;
 636 
 637         switch (type) {
 638         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
 639                 insn = aarch64_insn_get_ldp_pre_value();
 640                 break;
 641         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
 642                 insn = aarch64_insn_get_stp_pre_value();
 643                 break;
 644         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
 645                 insn = aarch64_insn_get_ldp_post_value();
 646                 break;
 647         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
 648                 insn = aarch64_insn_get_stp_post_value();
 649                 break;
 650         default:
 651                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 652                 return AARCH64_BREAK_FAULT;
 653         }
 654 
 655         switch (variant) {
 656         case AARCH64_INSN_VARIANT_32BIT:
 657                 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
 658                         pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
 659                                __func__, offset);
 660                         return AARCH64_BREAK_FAULT;
 661                 }
 662                 shift = 2;
 663                 break;
 664         case AARCH64_INSN_VARIANT_64BIT:
 665                 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
 666                         pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
 667                                __func__, offset);
 668                         return AARCH64_BREAK_FAULT;
 669                 }
 670                 shift = 3;
 671                 insn |= AARCH64_INSN_SF_BIT;
 672                 break;
 673         default:
 674                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 675                 return AARCH64_BREAK_FAULT;
 676         }
 677 
 678         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 679                                             reg1);
 680 
 681         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
 682                                             reg2);
 683 
 684         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 685                                             base);
 686 
 687         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
 688                                              offset >> shift);
 689 }
 690 
 691 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
 692                                    enum aarch64_insn_register base,
 693                                    enum aarch64_insn_register state,
 694                                    enum aarch64_insn_size_type size,
 695                                    enum aarch64_insn_ldst_type type)
 696 {
 697         u32 insn;
 698 
 699         switch (type) {
 700         case AARCH64_INSN_LDST_LOAD_EX:
 701                 insn = aarch64_insn_get_load_ex_value();
 702                 break;
 703         case AARCH64_INSN_LDST_STORE_EX:
 704                 insn = aarch64_insn_get_store_ex_value();
 705                 break;
 706         default:
 707                 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
 708                 return AARCH64_BREAK_FAULT;
 709         }
 710 
 711         insn = aarch64_insn_encode_ldst_size(size, insn);
 712 
 713         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 714                                             reg);
 715 
 716         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 717                                             base);
 718 
 719         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
 720                                             AARCH64_INSN_REG_ZR);
 721 
 722         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
 723                                             state);
 724 }
 725 
 726 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
 727                            enum aarch64_insn_register address,
 728                            enum aarch64_insn_register value,
 729                            enum aarch64_insn_size_type size)
 730 {
 731         u32 insn = aarch64_insn_get_ldadd_value();
 732 
 733         switch (size) {
 734         case AARCH64_INSN_SIZE_32:
 735         case AARCH64_INSN_SIZE_64:
 736                 break;
 737         default:
 738                 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
 739                 return AARCH64_BREAK_FAULT;
 740         }
 741 
 742         insn = aarch64_insn_encode_ldst_size(size, insn);
 743 
 744         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 745                                             result);
 746 
 747         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 748                                             address);
 749 
 750         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
 751                                             value);
 752 }
 753 
 754 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
 755                            enum aarch64_insn_register value,
 756                            enum aarch64_insn_size_type size)
 757 {
 758         /*
 759          * STADD is simply encoded as an alias for LDADD with XZR as
 760          * the destination register.
 761          */
 762         return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
 763                                       value, size);
 764 }
 765 
 766 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
 767                                         enum aarch64_insn_prfm_target target,
 768                                         enum aarch64_insn_prfm_policy policy,
 769                                         u32 insn)
 770 {
 771         u32 imm_type = 0, imm_target = 0, imm_policy = 0;
 772 
 773         switch (type) {
 774         case AARCH64_INSN_PRFM_TYPE_PLD:
 775                 break;
 776         case AARCH64_INSN_PRFM_TYPE_PLI:
 777                 imm_type = BIT(0);
 778                 break;
 779         case AARCH64_INSN_PRFM_TYPE_PST:
 780                 imm_type = BIT(1);
 781                 break;
 782         default:
 783                 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
 784                 return AARCH64_BREAK_FAULT;
 785         }
 786 
 787         switch (target) {
 788         case AARCH64_INSN_PRFM_TARGET_L1:
 789                 break;
 790         case AARCH64_INSN_PRFM_TARGET_L2:
 791                 imm_target = BIT(0);
 792                 break;
 793         case AARCH64_INSN_PRFM_TARGET_L3:
 794                 imm_target = BIT(1);
 795                 break;
 796         default:
 797                 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
 798                 return AARCH64_BREAK_FAULT;
 799         }
 800 
 801         switch (policy) {
 802         case AARCH64_INSN_PRFM_POLICY_KEEP:
 803                 break;
 804         case AARCH64_INSN_PRFM_POLICY_STRM:
 805                 imm_policy = BIT(0);
 806                 break;
 807         default:
 808                 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
 809                 return AARCH64_BREAK_FAULT;
 810         }
 811 
 812         /* In this case, imm5 is encoded into Rt field. */
 813         insn &= ~GENMASK(4, 0);
 814         insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
 815 
 816         return insn;
 817 }
 818 
 819 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 820                               enum aarch64_insn_prfm_type type,
 821                               enum aarch64_insn_prfm_target target,
 822                               enum aarch64_insn_prfm_policy policy)
 823 {
 824         u32 insn = aarch64_insn_get_prfm_value();
 825 
 826         insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
 827 
 828         insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
 829 
 830         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 831                                             base);
 832 
 833         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
 834 }
 835 
 836 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
 837                                  enum aarch64_insn_register src,
 838                                  int imm, enum aarch64_insn_variant variant,
 839                                  enum aarch64_insn_adsb_type type)
 840 {
 841         u32 insn;
 842 
 843         switch (type) {
 844         case AARCH64_INSN_ADSB_ADD:
 845                 insn = aarch64_insn_get_add_imm_value();
 846                 break;
 847         case AARCH64_INSN_ADSB_SUB:
 848                 insn = aarch64_insn_get_sub_imm_value();
 849                 break;
 850         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
 851                 insn = aarch64_insn_get_adds_imm_value();
 852                 break;
 853         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
 854                 insn = aarch64_insn_get_subs_imm_value();
 855                 break;
 856         default:
 857                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
 858                 return AARCH64_BREAK_FAULT;
 859         }
 860 
 861         switch (variant) {
 862         case AARCH64_INSN_VARIANT_32BIT:
 863                 break;
 864         case AARCH64_INSN_VARIANT_64BIT:
 865                 insn |= AARCH64_INSN_SF_BIT;
 866                 break;
 867         default:
 868                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 869                 return AARCH64_BREAK_FAULT;
 870         }
 871 
 872         /* We can't encode more than a 24bit value (12bit + 12bit shift) */
 873         if (imm & ~(BIT(24) - 1))
 874                 goto out;
 875 
 876         /* If we have something in the top 12 bits... */
 877         if (imm & ~(SZ_4K - 1)) {
 878                 /* ... and in the low 12 bits -> error */
 879                 if (imm & (SZ_4K - 1))
 880                         goto out;
 881 
 882                 imm >>= 12;
 883                 insn |= AARCH64_INSN_LSL_12;
 884         }
 885 
 886         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 887 
 888         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
 889 
 890         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
 891 
 892 out:
 893         pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
 894         return AARCH64_BREAK_FAULT;
 895 }
 896 
 897 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
 898                               enum aarch64_insn_register src,
 899                               int immr, int imms,
 900                               enum aarch64_insn_variant variant,
 901                               enum aarch64_insn_bitfield_type type)
 902 {
 903         u32 insn;
 904         u32 mask;
 905 
 906         switch (type) {
 907         case AARCH64_INSN_BITFIELD_MOVE:
 908                 insn = aarch64_insn_get_bfm_value();
 909                 break;
 910         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
 911                 insn = aarch64_insn_get_ubfm_value();
 912                 break;
 913         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
 914                 insn = aarch64_insn_get_sbfm_value();
 915                 break;
 916         default:
 917                 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
 918                 return AARCH64_BREAK_FAULT;
 919         }
 920 
 921         switch (variant) {
 922         case AARCH64_INSN_VARIANT_32BIT:
 923                 mask = GENMASK(4, 0);
 924                 break;
 925         case AARCH64_INSN_VARIANT_64BIT:
 926                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
 927                 mask = GENMASK(5, 0);
 928                 break;
 929         default:
 930                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 931                 return AARCH64_BREAK_FAULT;
 932         }
 933 
 934         if (immr & ~mask) {
 935                 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
 936                 return AARCH64_BREAK_FAULT;
 937         }
 938         if (imms & ~mask) {
 939                 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
 940                 return AARCH64_BREAK_FAULT;
 941         }
 942 
 943         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 944 
 945         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
 946 
 947         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
 948 
 949         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
 950 }
 951 
 952 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
 953                               int imm, int shift,
 954                               enum aarch64_insn_variant variant,
 955                               enum aarch64_insn_movewide_type type)
 956 {
 957         u32 insn;
 958 
 959         switch (type) {
 960         case AARCH64_INSN_MOVEWIDE_ZERO:
 961                 insn = aarch64_insn_get_movz_value();
 962                 break;
 963         case AARCH64_INSN_MOVEWIDE_KEEP:
 964                 insn = aarch64_insn_get_movk_value();
 965                 break;
 966         case AARCH64_INSN_MOVEWIDE_INVERSE:
 967                 insn = aarch64_insn_get_movn_value();
 968                 break;
 969         default:
 970                 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
 971                 return AARCH64_BREAK_FAULT;
 972         }
 973 
 974         if (imm & ~(SZ_64K - 1)) {
 975                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
 976                 return AARCH64_BREAK_FAULT;
 977         }
 978 
 979         switch (variant) {
 980         case AARCH64_INSN_VARIANT_32BIT:
 981                 if (shift != 0 && shift != 16) {
 982                         pr_err("%s: invalid shift encoding %d\n", __func__,
 983                                shift);
 984                         return AARCH64_BREAK_FAULT;
 985                 }
 986                 break;
 987         case AARCH64_INSN_VARIANT_64BIT:
 988                 insn |= AARCH64_INSN_SF_BIT;
 989                 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
 990                         pr_err("%s: invalid shift encoding %d\n", __func__,
 991                                shift);
 992                         return AARCH64_BREAK_FAULT;
 993                 }
 994                 break;
 995         default:
 996                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 997                 return AARCH64_BREAK_FAULT;
 998         }
 999 
1000         insn |= (shift >> 4) << 21;
1001 
1002         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1003 
1004         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1005 }
1006 
1007 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1008                                          enum aarch64_insn_register src,
1009                                          enum aarch64_insn_register reg,
1010                                          int shift,
1011                                          enum aarch64_insn_variant variant,
1012                                          enum aarch64_insn_adsb_type type)
1013 {
1014         u32 insn;
1015 
1016         switch (type) {
1017         case AARCH64_INSN_ADSB_ADD:
1018                 insn = aarch64_insn_get_add_value();
1019                 break;
1020         case AARCH64_INSN_ADSB_SUB:
1021                 insn = aarch64_insn_get_sub_value();
1022                 break;
1023         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1024                 insn = aarch64_insn_get_adds_value();
1025                 break;
1026         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1027                 insn = aarch64_insn_get_subs_value();
1028                 break;
1029         default:
1030                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1031                 return AARCH64_BREAK_FAULT;
1032         }
1033 
1034         switch (variant) {
1035         case AARCH64_INSN_VARIANT_32BIT:
1036                 if (shift & ~(SZ_32 - 1)) {
1037                         pr_err("%s: invalid shift encoding %d\n", __func__,
1038                                shift);
1039                         return AARCH64_BREAK_FAULT;
1040                 }
1041                 break;
1042         case AARCH64_INSN_VARIANT_64BIT:
1043                 insn |= AARCH64_INSN_SF_BIT;
1044                 if (shift & ~(SZ_64 - 1)) {
1045                         pr_err("%s: invalid shift encoding %d\n", __func__,
1046                                shift);
1047                         return AARCH64_BREAK_FAULT;
1048                 }
1049                 break;
1050         default:
1051                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1052                 return AARCH64_BREAK_FAULT;
1053         }
1054 
1055 
1056         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1057 
1058         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1059 
1060         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1061 
1062         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1063 }
1064 
1065 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1066                            enum aarch64_insn_register src,
1067                            enum aarch64_insn_variant variant,
1068                            enum aarch64_insn_data1_type type)
1069 {
1070         u32 insn;
1071 
1072         switch (type) {
1073         case AARCH64_INSN_DATA1_REVERSE_16:
1074                 insn = aarch64_insn_get_rev16_value();
1075                 break;
1076         case AARCH64_INSN_DATA1_REVERSE_32:
1077                 insn = aarch64_insn_get_rev32_value();
1078                 break;
1079         case AARCH64_INSN_DATA1_REVERSE_64:
1080                 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1081                         pr_err("%s: invalid variant for reverse64 %d\n",
1082                                __func__, variant);
1083                         return AARCH64_BREAK_FAULT;
1084                 }
1085                 insn = aarch64_insn_get_rev64_value();
1086                 break;
1087         default:
1088                 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1089                 return AARCH64_BREAK_FAULT;
1090         }
1091 
1092         switch (variant) {
1093         case AARCH64_INSN_VARIANT_32BIT:
1094                 break;
1095         case AARCH64_INSN_VARIANT_64BIT:
1096                 insn |= AARCH64_INSN_SF_BIT;
1097                 break;
1098         default:
1099                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1100                 return AARCH64_BREAK_FAULT;
1101         }
1102 
1103         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1104 
1105         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1106 }
1107 
1108 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1109                            enum aarch64_insn_register src,
1110                            enum aarch64_insn_register reg,
1111                            enum aarch64_insn_variant variant,
1112                            enum aarch64_insn_data2_type type)
1113 {
1114         u32 insn;
1115 
1116         switch (type) {
1117         case AARCH64_INSN_DATA2_UDIV:
1118                 insn = aarch64_insn_get_udiv_value();
1119                 break;
1120         case AARCH64_INSN_DATA2_SDIV:
1121                 insn = aarch64_insn_get_sdiv_value();
1122                 break;
1123         case AARCH64_INSN_DATA2_LSLV:
1124                 insn = aarch64_insn_get_lslv_value();
1125                 break;
1126         case AARCH64_INSN_DATA2_LSRV:
1127                 insn = aarch64_insn_get_lsrv_value();
1128                 break;
1129         case AARCH64_INSN_DATA2_ASRV:
1130                 insn = aarch64_insn_get_asrv_value();
1131                 break;
1132         case AARCH64_INSN_DATA2_RORV:
1133                 insn = aarch64_insn_get_rorv_value();
1134                 break;
1135         default:
1136                 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1137                 return AARCH64_BREAK_FAULT;
1138         }
1139 
1140         switch (variant) {
1141         case AARCH64_INSN_VARIANT_32BIT:
1142                 break;
1143         case AARCH64_INSN_VARIANT_64BIT:
1144                 insn |= AARCH64_INSN_SF_BIT;
1145                 break;
1146         default:
1147                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1148                 return AARCH64_BREAK_FAULT;
1149         }
1150 
1151         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1152 
1153         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1154 
1155         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1156 }
1157 
1158 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1159                            enum aarch64_insn_register src,
1160                            enum aarch64_insn_register reg1,
1161                            enum aarch64_insn_register reg2,
1162                            enum aarch64_insn_variant variant,
1163                            enum aarch64_insn_data3_type type)
1164 {
1165         u32 insn;
1166 
1167         switch (type) {
1168         case AARCH64_INSN_DATA3_MADD:
1169                 insn = aarch64_insn_get_madd_value();
1170                 break;
1171         case AARCH64_INSN_DATA3_MSUB:
1172                 insn = aarch64_insn_get_msub_value();
1173                 break;
1174         default:
1175                 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1176                 return AARCH64_BREAK_FAULT;
1177         }
1178 
1179         switch (variant) {
1180         case AARCH64_INSN_VARIANT_32BIT:
1181                 break;
1182         case AARCH64_INSN_VARIANT_64BIT:
1183                 insn |= AARCH64_INSN_SF_BIT;
1184                 break;
1185         default:
1186                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1187                 return AARCH64_BREAK_FAULT;
1188         }
1189 
1190         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1191 
1192         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1193 
1194         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1195                                             reg1);
1196 
1197         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1198                                             reg2);
1199 }
1200 
1201 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1202                                          enum aarch64_insn_register src,
1203                                          enum aarch64_insn_register reg,
1204                                          int shift,
1205                                          enum aarch64_insn_variant variant,
1206                                          enum aarch64_insn_logic_type type)
1207 {
1208         u32 insn;
1209 
1210         switch (type) {
1211         case AARCH64_INSN_LOGIC_AND:
1212                 insn = aarch64_insn_get_and_value();
1213                 break;
1214         case AARCH64_INSN_LOGIC_BIC:
1215                 insn = aarch64_insn_get_bic_value();
1216                 break;
1217         case AARCH64_INSN_LOGIC_ORR:
1218                 insn = aarch64_insn_get_orr_value();
1219                 break;
1220         case AARCH64_INSN_LOGIC_ORN:
1221                 insn = aarch64_insn_get_orn_value();
1222                 break;
1223         case AARCH64_INSN_LOGIC_EOR:
1224                 insn = aarch64_insn_get_eor_value();
1225                 break;
1226         case AARCH64_INSN_LOGIC_EON:
1227                 insn = aarch64_insn_get_eon_value();
1228                 break;
1229         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1230                 insn = aarch64_insn_get_ands_value();
1231                 break;
1232         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1233                 insn = aarch64_insn_get_bics_value();
1234                 break;
1235         default:
1236                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1237                 return AARCH64_BREAK_FAULT;
1238         }
1239 
1240         switch (variant) {
1241         case AARCH64_INSN_VARIANT_32BIT:
1242                 if (shift & ~(SZ_32 - 1)) {
1243                         pr_err("%s: invalid shift encoding %d\n", __func__,
1244                                shift);
1245                         return AARCH64_BREAK_FAULT;
1246                 }
1247                 break;
1248         case AARCH64_INSN_VARIANT_64BIT:
1249                 insn |= AARCH64_INSN_SF_BIT;
1250                 if (shift & ~(SZ_64 - 1)) {
1251                         pr_err("%s: invalid shift encoding %d\n", __func__,
1252                                shift);
1253                         return AARCH64_BREAK_FAULT;
1254                 }
1255                 break;
1256         default:
1257                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1258                 return AARCH64_BREAK_FAULT;
1259         }
1260 
1261 
1262         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1263 
1264         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1265 
1266         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1267 
1268         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1269 }
1270 
1271 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1272                          enum aarch64_insn_register reg,
1273                          enum aarch64_insn_adr_type type)
1274 {
1275         u32 insn;
1276         s32 offset;
1277 
1278         switch (type) {
1279         case AARCH64_INSN_ADR_TYPE_ADR:
1280                 insn = aarch64_insn_get_adr_value();
1281                 offset = addr - pc;
1282                 break;
1283         case AARCH64_INSN_ADR_TYPE_ADRP:
1284                 insn = aarch64_insn_get_adrp_value();
1285                 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1286                 break;
1287         default:
1288                 pr_err("%s: unknown adr encoding %d\n", __func__, type);
1289                 return AARCH64_BREAK_FAULT;
1290         }
1291 
1292         if (offset < -SZ_1M || offset >= SZ_1M)
1293                 return AARCH64_BREAK_FAULT;
1294 
1295         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1296 
1297         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1298 }
1299 
1300 /*
1301  * Decode the imm field of a branch, and return the byte offset as a
1302  * signed value (so it can be used when computing a new branch
1303  * target).
1304  */
1305 s32 aarch64_get_branch_offset(u32 insn)
1306 {
1307         s32 imm;
1308 
1309         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1310                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1311                 return (imm << 6) >> 4;
1312         }
1313 
1314         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1315             aarch64_insn_is_bcond(insn)) {
1316                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1317                 return (imm << 13) >> 11;
1318         }
1319 
1320         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1321                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1322                 return (imm << 18) >> 16;
1323         }
1324 
1325         /* Unhandled instruction */
1326         BUG();
1327 }
1328 
1329 /*
1330  * Encode the displacement of a branch in the imm field and return the
1331  * updated instruction.
1332  */
1333 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1334 {
1335         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1336                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1337                                                      offset >> 2);
1338 
1339         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1340             aarch64_insn_is_bcond(insn))
1341                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1342                                                      offset >> 2);
1343 
1344         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1345                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1346                                                      offset >> 2);
1347 
1348         /* Unhandled instruction */
1349         BUG();
1350 }
1351 
1352 s32 aarch64_insn_adrp_get_offset(u32 insn)
1353 {
1354         BUG_ON(!aarch64_insn_is_adrp(insn));
1355         return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1356 }
1357 
1358 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1359 {
1360         BUG_ON(!aarch64_insn_is_adrp(insn));
1361         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1362                                                 offset >> 12);
1363 }
1364 
1365 /*
1366  * Extract the Op/CR data from a msr/mrs instruction.
1367  */
1368 u32 aarch64_insn_extract_system_reg(u32 insn)
1369 {
1370         return (insn & 0x1FFFE0) >> 5;
1371 }
1372 
1373 bool aarch32_insn_is_wide(u32 insn)
1374 {
1375         return insn >= 0xe800;
1376 }
1377 
1378 /*
1379  * Macros/defines for extracting register numbers from instruction.
1380  */
1381 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1382 {
1383         return (insn & (0xf << offset)) >> offset;
1384 }
1385 
1386 #define OPC2_MASK       0x7
1387 #define OPC2_OFFSET     5
1388 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1389 {
1390         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1391 }
1392 
1393 #define CRM_MASK        0xf
1394 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1395 {
1396         return insn & CRM_MASK;
1397 }
1398 
1399 static bool __kprobes __check_eq(unsigned long pstate)
1400 {
1401         return (pstate & PSR_Z_BIT) != 0;
1402 }
1403 
1404 static bool __kprobes __check_ne(unsigned long pstate)
1405 {
1406         return (pstate & PSR_Z_BIT) == 0;
1407 }
1408 
1409 static bool __kprobes __check_cs(unsigned long pstate)
1410 {
1411         return (pstate & PSR_C_BIT) != 0;
1412 }
1413 
1414 static bool __kprobes __check_cc(unsigned long pstate)
1415 {
1416         return (pstate & PSR_C_BIT) == 0;
1417 }
1418 
1419 static bool __kprobes __check_mi(unsigned long pstate)
1420 {
1421         return (pstate & PSR_N_BIT) != 0;
1422 }
1423 
1424 static bool __kprobes __check_pl(unsigned long pstate)
1425 {
1426         return (pstate & PSR_N_BIT) == 0;
1427 }
1428 
1429 static bool __kprobes __check_vs(unsigned long pstate)
1430 {
1431         return (pstate & PSR_V_BIT) != 0;
1432 }
1433 
1434 static bool __kprobes __check_vc(unsigned long pstate)
1435 {
1436         return (pstate & PSR_V_BIT) == 0;
1437 }
1438 
1439 static bool __kprobes __check_hi(unsigned long pstate)
1440 {
1441         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1442         return (pstate & PSR_C_BIT) != 0;
1443 }
1444 
1445 static bool __kprobes __check_ls(unsigned long pstate)
1446 {
1447         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1448         return (pstate & PSR_C_BIT) == 0;
1449 }
1450 
1451 static bool __kprobes __check_ge(unsigned long pstate)
1452 {
1453         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1454         return (pstate & PSR_N_BIT) == 0;
1455 }
1456 
1457 static bool __kprobes __check_lt(unsigned long pstate)
1458 {
1459         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1460         return (pstate & PSR_N_BIT) != 0;
1461 }
1462 
1463 static bool __kprobes __check_gt(unsigned long pstate)
1464 {
1465         /*PSR_N_BIT ^= PSR_V_BIT */
1466         unsigned long temp = pstate ^ (pstate << 3);
1467 
1468         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1469         return (temp & PSR_N_BIT) == 0;
1470 }
1471 
1472 static bool __kprobes __check_le(unsigned long pstate)
1473 {
1474         /*PSR_N_BIT ^= PSR_V_BIT */
1475         unsigned long temp = pstate ^ (pstate << 3);
1476 
1477         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1478         return (temp & PSR_N_BIT) != 0;
1479 }
1480 
1481 static bool __kprobes __check_al(unsigned long pstate)
1482 {
1483         return true;
1484 }
1485 
1486 /*
1487  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1488  * it behaves identically to 0b1110 ("al").
1489  */
1490 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1491         __check_eq, __check_ne, __check_cs, __check_cc,
1492         __check_mi, __check_pl, __check_vs, __check_vc,
1493         __check_hi, __check_ls, __check_ge, __check_lt,
1494         __check_gt, __check_le, __check_al, __check_al
1495 };
1496 
1497 static bool range_of_ones(u64 val)
1498 {
1499         /* Doesn't handle full ones or full zeroes */
1500         u64 sval = val >> __ffs64(val);
1501 
1502         /* One of Sean Eron Anderson's bithack tricks */
1503         return ((sval + 1) & (sval)) == 0;
1504 }
1505 
1506 static u32 aarch64_encode_immediate(u64 imm,
1507                                     enum aarch64_insn_variant variant,
1508                                     u32 insn)
1509 {
1510         unsigned int immr, imms, n, ones, ror, esz, tmp;
1511         u64 mask = ~0UL;
1512 
1513         /* Can't encode full zeroes or full ones */
1514         if (!imm || !~imm)
1515                 return AARCH64_BREAK_FAULT;
1516 
1517         switch (variant) {
1518         case AARCH64_INSN_VARIANT_32BIT:
1519                 if (upper_32_bits(imm))
1520                         return AARCH64_BREAK_FAULT;
1521                 esz = 32;
1522                 break;
1523         case AARCH64_INSN_VARIANT_64BIT:
1524                 insn |= AARCH64_INSN_SF_BIT;
1525                 esz = 64;
1526                 break;
1527         default:
1528                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1529                 return AARCH64_BREAK_FAULT;
1530         }
1531 
1532         /*
1533          * Inverse of Replicate(). Try to spot a repeating pattern
1534          * with a pow2 stride.
1535          */
1536         for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1537                 u64 emask = BIT(tmp) - 1;
1538 
1539                 if ((imm & emask) != ((imm >> tmp) & emask))
1540                         break;
1541 
1542                 esz = tmp;
1543                 mask = emask;
1544         }
1545 
1546         /* N is only set if we're encoding a 64bit value */
1547         n = esz == 64;
1548 
1549         /* Trim imm to the element size */
1550         imm &= mask;
1551 
1552         /* That's how many ones we need to encode */
1553         ones = hweight64(imm);
1554 
1555         /*
1556          * imms is set to (ones - 1), prefixed with a string of ones
1557          * and a zero if they fit. Cap it to 6 bits.
1558          */
1559         imms  = ones - 1;
1560         imms |= 0xf << ffs(esz);
1561         imms &= BIT(6) - 1;
1562 
1563         /* Compute the rotation */
1564         if (range_of_ones(imm)) {
1565                 /*
1566                  * Pattern: 0..01..10..0
1567                  *
1568                  * Compute how many rotate we need to align it right
1569                  */
1570                 ror = __ffs64(imm);
1571         } else {
1572                 /*
1573                  * Pattern: 0..01..10..01..1
1574                  *
1575                  * Fill the unused top bits with ones, and check if
1576                  * the result is a valid immediate (all ones with a
1577                  * contiguous ranges of zeroes).
1578                  */
1579                 imm |= ~mask;
1580                 if (!range_of_ones(~imm))
1581                         return AARCH64_BREAK_FAULT;
1582 
1583                 /*
1584                  * Compute the rotation to get a continuous set of
1585                  * ones, with the first bit set at position 0
1586                  */
1587                 ror = fls(~imm);
1588         }
1589 
1590         /*
1591          * immr is the number of bits we need to rotate back to the
1592          * original set of ones. Note that this is relative to the
1593          * element size...
1594          */
1595         immr = (esz - ror) % esz;
1596 
1597         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1598         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1599         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1600 }
1601 
1602 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1603                                        enum aarch64_insn_variant variant,
1604                                        enum aarch64_insn_register Rn,
1605                                        enum aarch64_insn_register Rd,
1606                                        u64 imm)
1607 {
1608         u32 insn;
1609 
1610         switch (type) {
1611         case AARCH64_INSN_LOGIC_AND:
1612                 insn = aarch64_insn_get_and_imm_value();
1613                 break;
1614         case AARCH64_INSN_LOGIC_ORR:
1615                 insn = aarch64_insn_get_orr_imm_value();
1616                 break;
1617         case AARCH64_INSN_LOGIC_EOR:
1618                 insn = aarch64_insn_get_eor_imm_value();
1619                 break;
1620         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1621                 insn = aarch64_insn_get_ands_imm_value();
1622                 break;
1623         default:
1624                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1625                 return AARCH64_BREAK_FAULT;
1626         }
1627 
1628         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1629         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1630         return aarch64_encode_immediate(imm, variant, insn);
1631 }
1632 
1633 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1634                           enum aarch64_insn_register Rm,
1635                           enum aarch64_insn_register Rn,
1636                           enum aarch64_insn_register Rd,
1637                           u8 lsb)
1638 {
1639         u32 insn;
1640 
1641         insn = aarch64_insn_get_extr_value();
1642 
1643         switch (variant) {
1644         case AARCH64_INSN_VARIANT_32BIT:
1645                 if (lsb > 31)
1646                         return AARCH64_BREAK_FAULT;
1647                 break;
1648         case AARCH64_INSN_VARIANT_64BIT:
1649                 if (lsb > 63)
1650                         return AARCH64_BREAK_FAULT;
1651                 insn |= AARCH64_INSN_SF_BIT;
1652                 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1653                 break;
1654         default:
1655                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1656                 return AARCH64_BREAK_FAULT;
1657         }
1658 
1659         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1660         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1661         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1662         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1663 }

/* [<][>][^][v][top][bottom][index][help] */