This source file includes following definitions.
- cpucap_default_scope
- cpucap_late_cpu_optional
- cpucap_late_cpu_permitted
- cpucap_multi_entry_cap_matches
- __cpus_have_const_cap
- cpus_have_cap
- cpus_have_const_cap
- cpus_set_cap
- cpuid_feature_extract_signed_field_width
- cpuid_feature_extract_signed_field
- cpuid_feature_extract_unsigned_field_width
- cpuid_feature_extract_unsigned_field
- arm64_ftr_mask
- arm64_ftr_reg_user_value
- cpuid_feature_extract_field_width
- cpuid_feature_extract_field
- arm64_ftr_value
- id_aa64mmfr0_mixed_endian_el0
- id_aa64pfr0_32bit_el0
- id_aa64pfr0_sve
- cpu_supports_mixed_endian_el0
- system_supports_32bit_el0
- system_supports_4kb_granule
- system_supports_64kb_granule
- system_supports_16kb_granule
- system_supports_mixed_endian_el0
- system_supports_mixed_endian
- system_supports_fpsimd
- system_uses_ttbr0_pan
- system_supports_sve
- system_supports_cnp
- system_supports_address_auth
- system_supports_generic_auth
- system_uses_irq_prio_masking
- system_has_prio_mask_debugging
- arm64_get_ssbd_state
- id_aa64mmfr0_parange_to_phys_shift
1
2
3
4
5
6 #ifndef __ASM_CPUFEATURE_H
7 #define __ASM_CPUFEATURE_H
8
9 #include <asm/cpucaps.h>
10 #include <asm/cputype.h>
11 #include <asm/hwcap.h>
12 #include <asm/sysreg.h>
13
14 #define MAX_CPU_FEATURES 64
15 #define cpu_feature(x) KERNEL_HWCAP_ ## x
16
17 #ifndef __ASSEMBLY__
18
19 #include <linux/bug.h>
20 #include <linux/jump_label.h>
21 #include <linux/kernel.h>
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 enum ftr_type {
38 FTR_EXACT,
39 FTR_LOWER_SAFE,
40 FTR_HIGHER_SAFE,
41 FTR_HIGHER_OR_ZERO_SAFE,
42 };
43
44 #define FTR_STRICT true
45 #define FTR_NONSTRICT false
46
47 #define FTR_SIGNED true
48 #define FTR_UNSIGNED false
49
50 #define FTR_VISIBLE true
51 #define FTR_HIDDEN false
52
53 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
54 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
55
56 struct arm64_ftr_bits {
57 bool sign;
58 bool visible;
59 bool strict;
60 enum ftr_type type;
61 u8 shift;
62 u8 width;
63 s64 safe_val;
64 };
65
66
67
68
69
70
71 struct arm64_ftr_reg {
72 const char *name;
73 u64 strict_mask;
74 u64 user_mask;
75 u64 sys_val;
76 u64 user_val;
77 const struct arm64_ftr_bits *ftr_bits;
78 };
79
80 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
219 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
220
221
222
223
224
225 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
226 #define ARM64_CPUCAP_SCOPE_MASK \
227 (ARM64_CPUCAP_SCOPE_SYSTEM | \
228 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
229 ARM64_CPUCAP_SCOPE_BOOT_CPU)
230
231 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
232 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
233 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
234 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
235
236
237
238
239
240 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
241
242 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
243
244
245
246
247
248
249
250
251 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
252 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
253
254
255
256
257
258
259
260 #define ARM64_CPUCAP_SYSTEM_FEATURE \
261 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
262
263
264
265
266 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
267 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
268 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
269 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
270
271
272
273
274
275
276 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
277 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
278 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
279
280
281
282
283
284 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
285
286 struct arm64_cpu_capabilities {
287 const char *desc;
288 u16 capability;
289 u16 type;
290 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
291
292
293
294
295
296
297
298
299
300
301
302
303 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
304 union {
305 struct {
306 struct midr_range midr_range;
307 const struct arm64_midr_revidr {
308 u32 midr_rv;
309 u32 revidr_mask;
310 } * const fixed_revs;
311 };
312
313 const struct midr_range *midr_range_list;
314 struct {
315 u32 sys_reg;
316 u8 field_pos;
317 u8 min_field_value;
318 u8 hwcap_type;
319 bool sign;
320 unsigned long hwcap;
321 };
322 };
323
324
325
326
327
328
329
330
331
332
333
334
335 const struct arm64_cpu_capabilities *match_list;
336 };
337
338 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
339 {
340 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
341 }
342
343 static inline bool
344 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
345 {
346 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
347 }
348
349 static inline bool
350 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
351 {
352 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
353 }
354
355
356
357
358
359
360 static inline bool
361 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
362 int scope)
363 {
364 const struct arm64_cpu_capabilities *caps;
365
366 for (caps = entry->match_list; caps->matches; caps++)
367 if (caps->matches(caps, scope))
368 return true;
369
370 return false;
371 }
372
373 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
374 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
375 extern struct static_key_false arm64_const_caps_ready;
376
377
378 #define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
379 extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
380
381 #define for_each_available_cap(cap) \
382 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
383
384 bool this_cpu_has_cap(unsigned int cap);
385 void cpu_set_feature(unsigned int num);
386 bool cpu_have_feature(unsigned int num);
387 unsigned long cpu_get_elf_hwcap(void);
388 unsigned long cpu_get_elf_hwcap2(void);
389
390 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
391 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
392
393
394 static __always_inline bool __cpus_have_const_cap(int num)
395 {
396 if (num >= ARM64_NCAPS)
397 return false;
398 return static_branch_unlikely(&cpu_hwcap_keys[num]);
399 }
400
401 static inline bool cpus_have_cap(unsigned int num)
402 {
403 if (num >= ARM64_NCAPS)
404 return false;
405 return test_bit(num, cpu_hwcaps);
406 }
407
408 static __always_inline bool cpus_have_const_cap(int num)
409 {
410 if (static_branch_likely(&arm64_const_caps_ready))
411 return __cpus_have_const_cap(num);
412 else
413 return cpus_have_cap(num);
414 }
415
416 static inline void cpus_set_cap(unsigned int num)
417 {
418 if (num >= ARM64_NCAPS) {
419 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
420 num, ARM64_NCAPS);
421 } else {
422 __set_bit(num, cpu_hwcaps);
423 }
424 }
425
426 static inline int __attribute_const__
427 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
428 {
429 return (s64)(features << (64 - width - field)) >> (64 - width);
430 }
431
432 static inline int __attribute_const__
433 cpuid_feature_extract_signed_field(u64 features, int field)
434 {
435 return cpuid_feature_extract_signed_field_width(features, field, 4);
436 }
437
438 static inline unsigned int __attribute_const__
439 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
440 {
441 return (u64)(features << (64 - width - field)) >> (64 - width);
442 }
443
444 static inline unsigned int __attribute_const__
445 cpuid_feature_extract_unsigned_field(u64 features, int field)
446 {
447 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
448 }
449
450 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
451 {
452 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
453 }
454
455 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
456 {
457 return (reg->user_val | (reg->sys_val & reg->user_mask));
458 }
459
460 static inline int __attribute_const__
461 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
462 {
463 return (sign) ?
464 cpuid_feature_extract_signed_field_width(features, field, width) :
465 cpuid_feature_extract_unsigned_field_width(features, field, width);
466 }
467
468 static inline int __attribute_const__
469 cpuid_feature_extract_field(u64 features, int field, bool sign)
470 {
471 return cpuid_feature_extract_field_width(features, field, 4, sign);
472 }
473
474 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
475 {
476 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
477 }
478
479 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
480 {
481 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
482 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
483 }
484
485 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
486 {
487 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
488
489 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
490 }
491
492 static inline bool id_aa64pfr0_sve(u64 pfr0)
493 {
494 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
495
496 return val > 0;
497 }
498
499 void __init setup_cpu_features(void);
500 void check_local_cpu_capabilities(void);
501
502 u64 read_sanitised_ftr_reg(u32 id);
503
504 static inline bool cpu_supports_mixed_endian_el0(void)
505 {
506 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
507 }
508
509 static inline bool system_supports_32bit_el0(void)
510 {
511 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
512 }
513
514 static inline bool system_supports_4kb_granule(void)
515 {
516 u64 mmfr0;
517 u32 val;
518
519 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
520 val = cpuid_feature_extract_unsigned_field(mmfr0,
521 ID_AA64MMFR0_TGRAN4_SHIFT);
522
523 return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
524 }
525
526 static inline bool system_supports_64kb_granule(void)
527 {
528 u64 mmfr0;
529 u32 val;
530
531 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
532 val = cpuid_feature_extract_unsigned_field(mmfr0,
533 ID_AA64MMFR0_TGRAN64_SHIFT);
534
535 return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
536 }
537
538 static inline bool system_supports_16kb_granule(void)
539 {
540 u64 mmfr0;
541 u32 val;
542
543 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
544 val = cpuid_feature_extract_unsigned_field(mmfr0,
545 ID_AA64MMFR0_TGRAN16_SHIFT);
546
547 return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
548 }
549
550 static inline bool system_supports_mixed_endian_el0(void)
551 {
552 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
553 }
554
555 static inline bool system_supports_mixed_endian(void)
556 {
557 u64 mmfr0;
558 u32 val;
559
560 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
561 val = cpuid_feature_extract_unsigned_field(mmfr0,
562 ID_AA64MMFR0_BIGENDEL_SHIFT);
563
564 return val == 0x1;
565 }
566
567 static inline bool system_supports_fpsimd(void)
568 {
569 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
570 }
571
572 static inline bool system_uses_ttbr0_pan(void)
573 {
574 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
575 !cpus_have_const_cap(ARM64_HAS_PAN);
576 }
577
578 static inline bool system_supports_sve(void)
579 {
580 return IS_ENABLED(CONFIG_ARM64_SVE) &&
581 cpus_have_const_cap(ARM64_SVE);
582 }
583
584 static inline bool system_supports_cnp(void)
585 {
586 return IS_ENABLED(CONFIG_ARM64_CNP) &&
587 cpus_have_const_cap(ARM64_HAS_CNP);
588 }
589
590 static inline bool system_supports_address_auth(void)
591 {
592 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
593 (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
594 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
595 }
596
597 static inline bool system_supports_generic_auth(void)
598 {
599 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
600 (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
601 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
602 }
603
604 static inline bool system_uses_irq_prio_masking(void)
605 {
606 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
607 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
608 }
609
610 static inline bool system_has_prio_mask_debugging(void)
611 {
612 return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
613 system_uses_irq_prio_masking();
614 }
615
616 #define ARM64_BP_HARDEN_UNKNOWN -1
617 #define ARM64_BP_HARDEN_WA_NEEDED 0
618 #define ARM64_BP_HARDEN_NOT_REQUIRED 1
619
620 int get_spectre_v2_workaround_state(void);
621
622 #define ARM64_SSBD_UNKNOWN -1
623 #define ARM64_SSBD_FORCE_DISABLE 0
624 #define ARM64_SSBD_KERNEL 1
625 #define ARM64_SSBD_FORCE_ENABLE 2
626 #define ARM64_SSBD_MITIGATED 3
627
628 static inline int arm64_get_ssbd_state(void)
629 {
630 #ifdef CONFIG_ARM64_SSBD
631 extern int ssbd_state;
632 return ssbd_state;
633 #else
634 return ARM64_SSBD_UNKNOWN;
635 #endif
636 }
637
638 void arm64_set_ssbd_mitigation(bool state);
639
640 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
641
642 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
643 {
644 switch (parange) {
645 case 0: return 32;
646 case 1: return 36;
647 case 2: return 40;
648 case 3: return 42;
649 case 4: return 44;
650 case 5: return 48;
651 case 6: return 52;
652
653
654
655
656
657
658
659 default: return CONFIG_ARM64_PA_BITS;
660 }
661 }
662 #endif
663
664 #endif