root/arch/arm64/kernel/cpu_errata.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_affected_midr_range
  2. is_affected_midr_range_list
  3. is_kryo_midr
  4. has_mismatched_cache_type
  5. cpu_enable_trap_ctr_access
  6. __copy_hyp_vect_bpi
  7. install_bp_hardening_cb
  8. install_bp_hardening_cb
  9. call_smc_arch_workaround_1
  10. call_hvc_arch_workaround_1
  11. qcom_link_stack_sanitization
  12. parse_nospectre_v2
  13. detect_harden_bp_fw
  14. ssbd_cfg
  15. arm64_update_smccc_conduit
  16. arm64_enable_wa2_handling
  17. arm64_set_ssbd_mitigation
  18. has_ssbd_mitigation
  19. has_cortex_a76_erratum_1463225
  20. cpu_enable_cache_maint_trap
  21. get_spectre_v2_workaround_state
  22. check_branch_predictor
  23. needs_tx2_tvm_workaround
  24. has_neoverse_n1_erratum_1542419
  25. cpu_show_spectre_v1
  26. cpu_show_spectre_v2
  27. cpu_show_spec_store_bypass

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Contains CPU specific errata definitions
   4  *
   5  * Copyright (C) 2014 ARM Ltd.
   6  */
   7 
   8 #include <linux/arm-smccc.h>
   9 #include <linux/psci.h>
  10 #include <linux/types.h>
  11 #include <linux/cpu.h>
  12 #include <asm/cpu.h>
  13 #include <asm/cputype.h>
  14 #include <asm/cpufeature.h>
  15 #include <asm/smp_plat.h>
  16 
  17 static bool __maybe_unused
  18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
  19 {
  20         const struct arm64_midr_revidr *fix;
  21         u32 midr = read_cpuid_id(), revidr;
  22 
  23         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  24         if (!is_midr_in_range(midr, &entry->midr_range))
  25                 return false;
  26 
  27         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
  28         revidr = read_cpuid(REVIDR_EL1);
  29         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
  30                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
  31                         return false;
  32 
  33         return true;
  34 }
  35 
  36 static bool __maybe_unused
  37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
  38                             int scope)
  39 {
  40         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  41         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
  42 }
  43 
  44 static bool __maybe_unused
  45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
  46 {
  47         u32 model;
  48 
  49         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  50 
  51         model = read_cpuid_id();
  52         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
  53                  MIDR_ARCHITECTURE_MASK;
  54 
  55         return model == entry->midr_range.model;
  56 }
  57 
  58 static bool
  59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
  60                           int scope)
  61 {
  62         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  63         u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
  64         u64 ctr_raw, ctr_real;
  65 
  66         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  67 
  68         /*
  69          * We want to make sure that all the CPUs in the system expose
  70          * a consistent CTR_EL0 to make sure that applications behaves
  71          * correctly with migration.
  72          *
  73          * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
  74          *
  75          * 1) It is safe if the system doesn't support IDC, as CPU anyway
  76          *    reports IDC = 0, consistent with the rest.
  77          *
  78          * 2) If the system has IDC, it is still safe as we trap CTR_EL0
  79          *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
  80          *
  81          * So, we need to make sure either the raw CTR_EL0 or the effective
  82          * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
  83          */
  84         ctr_raw = read_cpuid_cachetype() & mask;
  85         ctr_real = read_cpuid_effective_cachetype() & mask;
  86 
  87         return (ctr_real != sys) && (ctr_raw != sys);
  88 }
  89 
  90 static void
  91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
  92 {
  93         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  94         bool enable_uct_trap = false;
  95 
  96         /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
  97         if ((read_cpuid_cachetype() & mask) !=
  98             (arm64_ftr_reg_ctrel0.sys_val & mask))
  99                 enable_uct_trap = true;
 100 
 101         /* ... or if the system is affected by an erratum */
 102         if (cap->capability == ARM64_WORKAROUND_1542419)
 103                 enable_uct_trap = true;
 104 
 105         if (enable_uct_trap)
 106                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 107 }
 108 
 109 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 110 
 111 #include <asm/mmu_context.h>
 112 #include <asm/cacheflush.h>
 113 
 114 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 115 
 116 #ifdef CONFIG_KVM_INDIRECT_VECTORS
 117 extern char __smccc_workaround_1_smc_start[];
 118 extern char __smccc_workaround_1_smc_end[];
 119 
 120 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 121                                 const char *hyp_vecs_end)
 122 {
 123         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
 124         int i;
 125 
 126         for (i = 0; i < SZ_2K; i += 0x80)
 127                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
 128 
 129         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 130 }
 131 
 132 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
 133                                     const char *hyp_vecs_start,
 134                                     const char *hyp_vecs_end)
 135 {
 136         static DEFINE_RAW_SPINLOCK(bp_lock);
 137         int cpu, slot = -1;
 138 
 139         /*
 140          * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
 141          * we're a guest. Skip the hyp-vectors work.
 142          */
 143         if (!hyp_vecs_start) {
 144                 __this_cpu_write(bp_hardening_data.fn, fn);
 145                 return;
 146         }
 147 
 148         raw_spin_lock(&bp_lock);
 149         for_each_possible_cpu(cpu) {
 150                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
 151                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
 152                         break;
 153                 }
 154         }
 155 
 156         if (slot == -1) {
 157                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
 158                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
 159                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
 160         }
 161 
 162         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
 163         __this_cpu_write(bp_hardening_data.fn, fn);
 164         raw_spin_unlock(&bp_lock);
 165 }
 166 #else
 167 #define __smccc_workaround_1_smc_start          NULL
 168 #define __smccc_workaround_1_smc_end            NULL
 169 
 170 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
 171                                       const char *hyp_vecs_start,
 172                                       const char *hyp_vecs_end)
 173 {
 174         __this_cpu_write(bp_hardening_data.fn, fn);
 175 }
 176 #endif  /* CONFIG_KVM_INDIRECT_VECTORS */
 177 
 178 #include <uapi/linux/psci.h>
 179 #include <linux/arm-smccc.h>
 180 #include <linux/psci.h>
 181 
 182 static void call_smc_arch_workaround_1(void)
 183 {
 184         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 185 }
 186 
 187 static void call_hvc_arch_workaround_1(void)
 188 {
 189         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 190 }
 191 
 192 static void qcom_link_stack_sanitization(void)
 193 {
 194         u64 tmp;
 195 
 196         asm volatile("mov       %0, x30         \n"
 197                      ".rept     16              \n"
 198                      "bl        . + 4           \n"
 199                      ".endr                     \n"
 200                      "mov       x30, %0         \n"
 201                      : "=&r" (tmp));
 202 }
 203 
 204 static bool __nospectre_v2;
 205 static int __init parse_nospectre_v2(char *str)
 206 {
 207         __nospectre_v2 = true;
 208         return 0;
 209 }
 210 early_param("nospectre_v2", parse_nospectre_v2);
 211 
 212 /*
 213  * -1: No workaround
 214  *  0: No workaround required
 215  *  1: Workaround installed
 216  */
 217 static int detect_harden_bp_fw(void)
 218 {
 219         bp_hardening_cb_t cb;
 220         void *smccc_start, *smccc_end;
 221         struct arm_smccc_res res;
 222         u32 midr = read_cpuid_id();
 223 
 224         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
 225                 return -1;
 226 
 227         switch (psci_ops.conduit) {
 228         case PSCI_CONDUIT_HVC:
 229                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 230                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 231                 switch ((int)res.a0) {
 232                 case 1:
 233                         /* Firmware says we're just fine */
 234                         return 0;
 235                 case 0:
 236                         cb = call_hvc_arch_workaround_1;
 237                         /* This is a guest, no need to patch KVM vectors */
 238                         smccc_start = NULL;
 239                         smccc_end = NULL;
 240                         break;
 241                 default:
 242                         return -1;
 243                 }
 244                 break;
 245 
 246         case PSCI_CONDUIT_SMC:
 247                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 248                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 249                 switch ((int)res.a0) {
 250                 case 1:
 251                         /* Firmware says we're just fine */
 252                         return 0;
 253                 case 0:
 254                         cb = call_smc_arch_workaround_1;
 255                         smccc_start = __smccc_workaround_1_smc_start;
 256                         smccc_end = __smccc_workaround_1_smc_end;
 257                         break;
 258                 default:
 259                         return -1;
 260                 }
 261                 break;
 262 
 263         default:
 264                 return -1;
 265         }
 266 
 267         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
 268             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
 269                 cb = qcom_link_stack_sanitization;
 270 
 271         if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
 272                 install_bp_hardening_cb(cb, smccc_start, smccc_end);
 273 
 274         return 1;
 275 }
 276 
 277 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 278 
 279 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
 280 static bool __ssb_safe = true;
 281 
 282 static const struct ssbd_options {
 283         const char      *str;
 284         int             state;
 285 } ssbd_options[] = {
 286         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
 287         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
 288         { "kernel",     ARM64_SSBD_KERNEL, },
 289 };
 290 
 291 static int __init ssbd_cfg(char *buf)
 292 {
 293         int i;
 294 
 295         if (!buf || !buf[0])
 296                 return -EINVAL;
 297 
 298         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
 299                 int len = strlen(ssbd_options[i].str);
 300 
 301                 if (strncmp(buf, ssbd_options[i].str, len))
 302                         continue;
 303 
 304                 ssbd_state = ssbd_options[i].state;
 305                 return 0;
 306         }
 307 
 308         return -EINVAL;
 309 }
 310 early_param("ssbd", ssbd_cfg);
 311 
 312 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
 313                                        __le32 *origptr, __le32 *updptr,
 314                                        int nr_inst)
 315 {
 316         u32 insn;
 317 
 318         BUG_ON(nr_inst != 1);
 319 
 320         switch (psci_ops.conduit) {
 321         case PSCI_CONDUIT_HVC:
 322                 insn = aarch64_insn_get_hvc_value();
 323                 break;
 324         case PSCI_CONDUIT_SMC:
 325                 insn = aarch64_insn_get_smc_value();
 326                 break;
 327         default:
 328                 return;
 329         }
 330 
 331         *updptr = cpu_to_le32(insn);
 332 }
 333 
 334 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 335                                       __le32 *origptr, __le32 *updptr,
 336                                       int nr_inst)
 337 {
 338         BUG_ON(nr_inst != 1);
 339         /*
 340          * Only allow mitigation on EL1 entry/exit and guest
 341          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
 342          * be flipped.
 343          */
 344         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
 345                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
 346 }
 347 
 348 void arm64_set_ssbd_mitigation(bool state)
 349 {
 350         if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
 351                 pr_info_once("SSBD disabled by kernel configuration\n");
 352                 return;
 353         }
 354 
 355         if (this_cpu_has_cap(ARM64_SSBS)) {
 356                 if (state)
 357                         asm volatile(SET_PSTATE_SSBS(0));
 358                 else
 359                         asm volatile(SET_PSTATE_SSBS(1));
 360                 return;
 361         }
 362 
 363         switch (psci_ops.conduit) {
 364         case PSCI_CONDUIT_HVC:
 365                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
 366                 break;
 367 
 368         case PSCI_CONDUIT_SMC:
 369                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
 370                 break;
 371 
 372         default:
 373                 WARN_ON_ONCE(1);
 374                 break;
 375         }
 376 }
 377 
 378 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 379                                     int scope)
 380 {
 381         struct arm_smccc_res res;
 382         bool required = true;
 383         s32 val;
 384         bool this_cpu_safe = false;
 385 
 386         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 387 
 388         if (cpu_mitigations_off())
 389                 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
 390 
 391         /* delay setting __ssb_safe until we get a firmware response */
 392         if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
 393                 this_cpu_safe = true;
 394 
 395         if (this_cpu_has_cap(ARM64_SSBS)) {
 396                 if (!this_cpu_safe)
 397                         __ssb_safe = false;
 398                 required = false;
 399                 goto out_printmsg;
 400         }
 401 
 402         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
 403                 ssbd_state = ARM64_SSBD_UNKNOWN;
 404                 if (!this_cpu_safe)
 405                         __ssb_safe = false;
 406                 return false;
 407         }
 408 
 409         switch (psci_ops.conduit) {
 410         case PSCI_CONDUIT_HVC:
 411                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 412                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
 413                 break;
 414 
 415         case PSCI_CONDUIT_SMC:
 416                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 417                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
 418                 break;
 419 
 420         default:
 421                 ssbd_state = ARM64_SSBD_UNKNOWN;
 422                 if (!this_cpu_safe)
 423                         __ssb_safe = false;
 424                 return false;
 425         }
 426 
 427         val = (s32)res.a0;
 428 
 429         switch (val) {
 430         case SMCCC_RET_NOT_SUPPORTED:
 431                 ssbd_state = ARM64_SSBD_UNKNOWN;
 432                 if (!this_cpu_safe)
 433                         __ssb_safe = false;
 434                 return false;
 435 
 436         /* machines with mixed mitigation requirements must not return this */
 437         case SMCCC_RET_NOT_REQUIRED:
 438                 pr_info_once("%s mitigation not required\n", entry->desc);
 439                 ssbd_state = ARM64_SSBD_MITIGATED;
 440                 return false;
 441 
 442         case SMCCC_RET_SUCCESS:
 443                 __ssb_safe = false;
 444                 required = true;
 445                 break;
 446 
 447         case 1: /* Mitigation not required on this CPU */
 448                 required = false;
 449                 break;
 450 
 451         default:
 452                 WARN_ON(1);
 453                 if (!this_cpu_safe)
 454                         __ssb_safe = false;
 455                 return false;
 456         }
 457 
 458         switch (ssbd_state) {
 459         case ARM64_SSBD_FORCE_DISABLE:
 460                 arm64_set_ssbd_mitigation(false);
 461                 required = false;
 462                 break;
 463 
 464         case ARM64_SSBD_KERNEL:
 465                 if (required) {
 466                         __this_cpu_write(arm64_ssbd_callback_required, 1);
 467                         arm64_set_ssbd_mitigation(true);
 468                 }
 469                 break;
 470 
 471         case ARM64_SSBD_FORCE_ENABLE:
 472                 arm64_set_ssbd_mitigation(true);
 473                 required = true;
 474                 break;
 475 
 476         default:
 477                 WARN_ON(1);
 478                 break;
 479         }
 480 
 481 out_printmsg:
 482         switch (ssbd_state) {
 483         case ARM64_SSBD_FORCE_DISABLE:
 484                 pr_info_once("%s disabled from command-line\n", entry->desc);
 485                 break;
 486 
 487         case ARM64_SSBD_FORCE_ENABLE:
 488                 pr_info_once("%s forced from command-line\n", entry->desc);
 489                 break;
 490         }
 491 
 492         return required;
 493 }
 494 
 495 /* known invulnerable cores */
 496 static const struct midr_range arm64_ssb_cpus[] = {
 497         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
 498         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
 499         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
 500         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
 501         {},
 502 };
 503 
 504 #ifdef CONFIG_ARM64_ERRATUM_1463225
 505 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 506 
 507 static bool
 508 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
 509                                int scope)
 510 {
 511         u32 midr = read_cpuid_id();
 512         /* Cortex-A76 r0p0 - r3p1 */
 513         struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
 514 
 515         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 516         return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
 517 }
 518 #endif
 519 
 520 static void __maybe_unused
 521 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
 522 {
 523         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
 524 }
 525 
 526 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
 527         .matches = is_affected_midr_range,                      \
 528         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 529 
 530 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
 531         .matches = is_affected_midr_range,                              \
 532         .midr_range = MIDR_ALL_VERSIONS(model)
 533 
 534 #define MIDR_FIXED(rev, revidr_mask) \
 535         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
 536 
 537 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
 538         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
 539         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 540 
 541 #define CAP_MIDR_RANGE_LIST(list)                               \
 542         .matches = is_affected_midr_range_list,                 \
 543         .midr_range_list = list
 544 
 545 /* Errata affecting a range of revisions of  given model variant */
 546 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
 547         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
 548 
 549 /* Errata affecting a single variant/revision of a model */
 550 #define ERRATA_MIDR_REV(model, var, rev)        \
 551         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
 552 
 553 /* Errata affecting all variants/revisions of a given a model */
 554 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
 555         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
 556         CAP_MIDR_ALL_VERSIONS(model)
 557 
 558 /* Errata affecting a list of midr ranges, with same work around */
 559 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
 560         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
 561         CAP_MIDR_RANGE_LIST(midr_list)
 562 
 563 /* Track overall mitigation state. We are only mitigated if all cores are ok */
 564 static bool __hardenbp_enab = true;
 565 static bool __spectrev2_safe = true;
 566 
 567 int get_spectre_v2_workaround_state(void)
 568 {
 569         if (__spectrev2_safe)
 570                 return ARM64_BP_HARDEN_NOT_REQUIRED;
 571 
 572         if (!__hardenbp_enab)
 573                 return ARM64_BP_HARDEN_UNKNOWN;
 574 
 575         return ARM64_BP_HARDEN_WA_NEEDED;
 576 }
 577 
 578 /*
 579  * List of CPUs that do not need any Spectre-v2 mitigation at all.
 580  */
 581 static const struct midr_range spectre_v2_safe_list[] = {
 582         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
 583         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
 584         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
 585         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
 586         MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
 587         { /* sentinel */ }
 588 };
 589 
 590 /*
 591  * Track overall bp hardening for all heterogeneous cores in the machine.
 592  * We are only considered "safe" if all booted cores are known safe.
 593  */
 594 static bool __maybe_unused
 595 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
 596 {
 597         int need_wa;
 598 
 599         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 600 
 601         /* If the CPU has CSV2 set, we're safe */
 602         if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
 603                                                  ID_AA64PFR0_CSV2_SHIFT))
 604                 return false;
 605 
 606         /* Alternatively, we have a list of unaffected CPUs */
 607         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
 608                 return false;
 609 
 610         /* Fallback to firmware detection */
 611         need_wa = detect_harden_bp_fw();
 612         if (!need_wa)
 613                 return false;
 614 
 615         __spectrev2_safe = false;
 616 
 617         if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
 618                 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
 619                 __hardenbp_enab = false;
 620                 return false;
 621         }
 622 
 623         /* forced off */
 624         if (__nospectre_v2 || cpu_mitigations_off()) {
 625                 pr_info_once("spectrev2 mitigation disabled by command line option\n");
 626                 __hardenbp_enab = false;
 627                 return false;
 628         }
 629 
 630         if (need_wa < 0) {
 631                 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
 632                 __hardenbp_enab = false;
 633         }
 634 
 635         return (need_wa > 0);
 636 }
 637 
 638 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
 639         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 640         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 641         {},
 642 };
 643 
 644 static bool __maybe_unused
 645 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
 646                          int scope)
 647 {
 648         int i;
 649 
 650         if (!is_affected_midr_range_list(entry, scope) ||
 651             !is_hyp_mode_available())
 652                 return false;
 653 
 654         for_each_possible_cpu(i) {
 655                 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
 656                         return true;
 657         }
 658 
 659         return false;
 660 }
 661 
 662 static bool __maybe_unused
 663 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
 664                                 int scope)
 665 {
 666         u32 midr = read_cpuid_id();
 667         bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
 668         const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
 669 
 670         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 671         return is_midr_in_range(midr, &range) && has_dic;
 672 }
 673 
 674 #ifdef CONFIG_HARDEN_EL2_VECTORS
 675 
 676 static const struct midr_range arm64_harden_el2_vectors[] = {
 677         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 678         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 679         {},
 680 };
 681 
 682 #endif
 683 
 684 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
 685 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 686 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
 687         {
 688                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
 689         },
 690         {
 691                 .midr_range.model = MIDR_QCOM_KRYO,
 692                 .matches = is_kryo_midr,
 693         },
 694 #endif
 695 #ifdef CONFIG_ARM64_ERRATUM_1286807
 696         {
 697                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
 698         },
 699 #endif
 700         {},
 701 };
 702 #endif
 703 
 704 #ifdef CONFIG_CAVIUM_ERRATUM_27456
 705 const struct midr_range cavium_erratum_27456_cpus[] = {
 706         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
 707         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
 708         /* Cavium ThunderX, T81 pass 1.0 */
 709         MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
 710         {},
 711 };
 712 #endif
 713 
 714 #ifdef CONFIG_CAVIUM_ERRATUM_30115
 715 static const struct midr_range cavium_erratum_30115_cpus[] = {
 716         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
 717         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
 718         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
 719         MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
 720         /* Cavium ThunderX, T83 pass 1.0 */
 721         MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
 722         {},
 723 };
 724 #endif
 725 
 726 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
 727 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
 728         {
 729                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
 730         },
 731         {
 732                 .midr_range.model = MIDR_QCOM_KRYO,
 733                 .matches = is_kryo_midr,
 734         },
 735         {},
 736 };
 737 #endif
 738 
 739 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
 740 static const struct midr_range workaround_clean_cache[] = {
 741 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
 742         defined(CONFIG_ARM64_ERRATUM_827319) || \
 743         defined(CONFIG_ARM64_ERRATUM_824069)
 744         /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
 745         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
 746 #endif
 747 #ifdef  CONFIG_ARM64_ERRATUM_819472
 748         /* Cortex-A53 r0p[01] : ARM errata 819472 */
 749         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
 750 #endif
 751         {},
 752 };
 753 #endif
 754 
 755 #ifdef CONFIG_ARM64_ERRATUM_1418040
 756 /*
 757  * - 1188873 affects r0p0 to r2p0
 758  * - 1418040 affects r0p0 to r3p1
 759  */
 760 static const struct midr_range erratum_1418040_list[] = {
 761         /* Cortex-A76 r0p0 to r3p1 */
 762         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
 763         /* Neoverse-N1 r0p0 to r3p1 */
 764         MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
 765         {},
 766 };
 767 #endif
 768 
 769 #ifdef CONFIG_ARM64_ERRATUM_845719
 770 static const struct midr_range erratum_845719_list[] = {
 771         /* Cortex-A53 r0p[01234] */
 772         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
 773         /* Brahma-B53 r0p[0] */
 774         MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
 775         {},
 776 };
 777 #endif
 778 
 779 #ifdef CONFIG_ARM64_ERRATUM_843419
 780 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
 781         {
 782                 /* Cortex-A53 r0p[01234] */
 783                 .matches = is_affected_midr_range,
 784                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
 785                 MIDR_FIXED(0x4, BIT(8)),
 786         },
 787         {
 788                 /* Brahma-B53 r0p[0] */
 789                 .matches = is_affected_midr_range,
 790                 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
 791         },
 792         {},
 793 };
 794 #endif
 795 
 796 const struct arm64_cpu_capabilities arm64_errata[] = {
 797 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
 798         {
 799                 .desc = "ARM errata 826319, 827319, 824069, 819472",
 800                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
 801                 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
 802                 .cpu_enable = cpu_enable_cache_maint_trap,
 803         },
 804 #endif
 805 #ifdef CONFIG_ARM64_ERRATUM_832075
 806         {
 807         /* Cortex-A57 r0p0 - r1p2 */
 808                 .desc = "ARM erratum 832075",
 809                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
 810                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
 811                                   0, 0,
 812                                   1, 2),
 813         },
 814 #endif
 815 #ifdef CONFIG_ARM64_ERRATUM_834220
 816         {
 817         /* Cortex-A57 r0p0 - r1p2 */
 818                 .desc = "ARM erratum 834220",
 819                 .capability = ARM64_WORKAROUND_834220,
 820                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
 821                                   0, 0,
 822                                   1, 2),
 823         },
 824 #endif
 825 #ifdef CONFIG_ARM64_ERRATUM_843419
 826         {
 827                 .desc = "ARM erratum 843419",
 828                 .capability = ARM64_WORKAROUND_843419,
 829                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 830                 .matches = cpucap_multi_entry_cap_matches,
 831                 .match_list = erratum_843419_list,
 832         },
 833 #endif
 834 #ifdef CONFIG_ARM64_ERRATUM_845719
 835         {
 836                 .desc = "ARM erratum 845719",
 837                 .capability = ARM64_WORKAROUND_845719,
 838                 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
 839         },
 840 #endif
 841 #ifdef CONFIG_CAVIUM_ERRATUM_23154
 842         {
 843         /* Cavium ThunderX, pass 1.x */
 844                 .desc = "Cavium erratum 23154",
 845                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
 846                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
 847         },
 848 #endif
 849 #ifdef CONFIG_CAVIUM_ERRATUM_27456
 850         {
 851                 .desc = "Cavium erratum 27456",
 852                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
 853                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
 854         },
 855 #endif
 856 #ifdef CONFIG_CAVIUM_ERRATUM_30115
 857         {
 858                 .desc = "Cavium erratum 30115",
 859                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
 860                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
 861         },
 862 #endif
 863         {
 864                 .desc = "Mismatched cache type (CTR_EL0)",
 865                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
 866                 .matches = has_mismatched_cache_type,
 867                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 868                 .cpu_enable = cpu_enable_trap_ctr_access,
 869         },
 870 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
 871         {
 872                 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
 873                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
 874                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 875                 .matches = cpucap_multi_entry_cap_matches,
 876                 .match_list = qcom_erratum_1003_list,
 877         },
 878 #endif
 879 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
 880         {
 881                 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
 882                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
 883                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 884                 .matches = cpucap_multi_entry_cap_matches,
 885                 .match_list = arm64_repeat_tlbi_list,
 886         },
 887 #endif
 888 #ifdef CONFIG_ARM64_ERRATUM_858921
 889         {
 890         /* Cortex-A73 all versions */
 891                 .desc = "ARM erratum 858921",
 892                 .capability = ARM64_WORKAROUND_858921,
 893                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 894         },
 895 #endif
 896         {
 897                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 898                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 899                 .matches = check_branch_predictor,
 900         },
 901 #ifdef CONFIG_HARDEN_EL2_VECTORS
 902         {
 903                 .desc = "EL2 vector hardening",
 904                 .capability = ARM64_HARDEN_EL2_VECTORS,
 905                 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
 906         },
 907 #endif
 908         {
 909                 .desc = "Speculative Store Bypass Disable",
 910                 .capability = ARM64_SSBD,
 911                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 912                 .matches = has_ssbd_mitigation,
 913                 .midr_range_list = arm64_ssb_cpus,
 914         },
 915 #ifdef CONFIG_ARM64_ERRATUM_1418040
 916         {
 917                 .desc = "ARM erratum 1418040",
 918                 .capability = ARM64_WORKAROUND_1418040,
 919                 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
 920         },
 921 #endif
 922 #ifdef CONFIG_ARM64_ERRATUM_1165522
 923         {
 924                 /* Cortex-A76 r0p0 to r2p0 */
 925                 .desc = "ARM erratum 1165522",
 926                 .capability = ARM64_WORKAROUND_1165522,
 927                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
 928         },
 929 #endif
 930 #ifdef CONFIG_ARM64_ERRATUM_1463225
 931         {
 932                 .desc = "ARM erratum 1463225",
 933                 .capability = ARM64_WORKAROUND_1463225,
 934                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 935                 .matches = has_cortex_a76_erratum_1463225,
 936         },
 937 #endif
 938 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
 939         {
 940                 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
 941                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
 942                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
 943                 .matches = needs_tx2_tvm_workaround,
 944         },
 945         {
 946                 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
 947                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
 948                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
 949         },
 950 #endif
 951 #ifdef CONFIG_ARM64_ERRATUM_1542419
 952         {
 953                 /* we depend on the firmware portion for correctness */
 954                 .desc = "ARM erratum 1542419 (kernel portion)",
 955                 .capability = ARM64_WORKAROUND_1542419,
 956                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 957                 .matches = has_neoverse_n1_erratum_1542419,
 958                 .cpu_enable = cpu_enable_trap_ctr_access,
 959         },
 960 #endif
 961         {
 962         }
 963 };
 964 
 965 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
 966                             char *buf)
 967 {
 968         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
 969 }
 970 
 971 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
 972                 char *buf)
 973 {
 974         switch (get_spectre_v2_workaround_state()) {
 975         case ARM64_BP_HARDEN_NOT_REQUIRED:
 976                 return sprintf(buf, "Not affected\n");
 977         case ARM64_BP_HARDEN_WA_NEEDED:
 978                 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
 979         case ARM64_BP_HARDEN_UNKNOWN:
 980         default:
 981                 return sprintf(buf, "Vulnerable\n");
 982         }
 983 }
 984 
 985 ssize_t cpu_show_spec_store_bypass(struct device *dev,
 986                 struct device_attribute *attr, char *buf)
 987 {
 988         if (__ssb_safe)
 989                 return sprintf(buf, "Not affected\n");
 990 
 991         switch (ssbd_state) {
 992         case ARM64_SSBD_KERNEL:
 993         case ARM64_SSBD_FORCE_ENABLE:
 994                 if (IS_ENABLED(CONFIG_ARM64_SSBD))
 995                         return sprintf(buf,
 996                             "Mitigation: Speculative Store Bypass disabled via prctl\n");
 997         }
 998 
 999         return sprintf(buf, "Vulnerable\n");
1000 }

/* [<][>][^][v][top][bottom][index][help] */