root/arch/x86/include/asm/cpu_entry_area.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. cpu_entry_stack

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 
   3 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
   4 #define _ASM_X86_CPU_ENTRY_AREA_H
   5 
   6 #include <linux/percpu-defs.h>
   7 #include <asm/processor.h>
   8 #include <asm/intel_ds.h>
   9 
  10 #ifdef CONFIG_X86_64
  11 
  12 /* Macro to enforce the same ordering and stack sizes */
  13 #define ESTACKS_MEMBERS(guardsize, db2_holesize)\
  14         char    DF_stack_guard[guardsize];      \
  15         char    DF_stack[EXCEPTION_STKSZ];      \
  16         char    NMI_stack_guard[guardsize];     \
  17         char    NMI_stack[EXCEPTION_STKSZ];     \
  18         char    DB2_stack_guard[guardsize];     \
  19         char    DB2_stack[db2_holesize];        \
  20         char    DB1_stack_guard[guardsize];     \
  21         char    DB1_stack[EXCEPTION_STKSZ];     \
  22         char    DB_stack_guard[guardsize];      \
  23         char    DB_stack[EXCEPTION_STKSZ];      \
  24         char    MCE_stack_guard[guardsize];     \
  25         char    MCE_stack[EXCEPTION_STKSZ];     \
  26         char    IST_top_guard[guardsize];       \
  27 
  28 /* The exception stacks' physical storage. No guard pages required */
  29 struct exception_stacks {
  30         ESTACKS_MEMBERS(0, 0)
  31 };
  32 
  33 /* The effective cpu entry area mapping with guard pages. */
  34 struct cea_exception_stacks {
  35         ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
  36 };
  37 
  38 /*
  39  * The exception stack ordering in [cea_]exception_stacks
  40  */
  41 enum exception_stack_ordering {
  42         ESTACK_DF,
  43         ESTACK_NMI,
  44         ESTACK_DB2,
  45         ESTACK_DB1,
  46         ESTACK_DB,
  47         ESTACK_MCE,
  48         N_EXCEPTION_STACKS
  49 };
  50 
  51 #define CEA_ESTACK_SIZE(st)                                     \
  52         sizeof(((struct cea_exception_stacks *)0)->st## _stack)
  53 
  54 #define CEA_ESTACK_BOT(ceastp, st)                              \
  55         ((unsigned long)&(ceastp)->st## _stack)
  56 
  57 #define CEA_ESTACK_TOP(ceastp, st)                              \
  58         (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
  59 
  60 #define CEA_ESTACK_OFFS(st)                                     \
  61         offsetof(struct cea_exception_stacks, st## _stack)
  62 
  63 #define CEA_ESTACK_PAGES                                        \
  64         (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
  65 
  66 #endif
  67 
  68 /*
  69  * cpu_entry_area is a percpu region that contains things needed by the CPU
  70  * and early entry/exit code.  Real types aren't used for all fields here
  71  * to avoid circular header dependencies.
  72  *
  73  * Every field is a virtual alias of some other allocated backing store.
  74  * There is no direct allocation of a struct cpu_entry_area.
  75  */
  76 struct cpu_entry_area {
  77         char gdt[PAGE_SIZE];
  78 
  79         /*
  80          * The GDT is just below entry_stack and thus serves (on x86_64) as
  81          * a read-only guard page. On 32-bit the GDT must be writeable, so
  82          * it needs an extra guard page.
  83          */
  84 #ifdef CONFIG_X86_32
  85         char guard_entry_stack[PAGE_SIZE];
  86 #endif
  87         struct entry_stack_page entry_stack_page;
  88 
  89         /*
  90          * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
  91          * we need task switches to work, and task switches write to the TSS.
  92          */
  93         struct tss_struct tss;
  94 
  95 #ifdef CONFIG_X86_64
  96         /*
  97          * Exception stacks used for IST entries with guard pages.
  98          */
  99         struct cea_exception_stacks estacks;
 100 #endif
 101         /*
 102          * Per CPU debug store for Intel performance monitoring. Wastes a
 103          * full page at the moment.
 104          */
 105         struct debug_store cpu_debug_store;
 106         /*
 107          * The actual PEBS/BTS buffers must be mapped to user space
 108          * Reserve enough fixmap PTEs.
 109          */
 110         struct debug_store_buffers cpu_debug_buffers;
 111 };
 112 
 113 #define CPU_ENTRY_AREA_SIZE             (sizeof(struct cpu_entry_area))
 114 #define CPU_ENTRY_AREA_ARRAY_SIZE       (CPU_ENTRY_AREA_SIZE * NR_CPUS)
 115 
 116 /* Total size includes the readonly IDT mapping page as well: */
 117 #define CPU_ENTRY_AREA_TOTAL_SIZE       (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
 118 
 119 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 120 DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
 121 
 122 extern void setup_cpu_entry_areas(void);
 123 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
 124 
 125 /* Single page reserved for the readonly IDT mapping: */
 126 #define CPU_ENTRY_AREA_RO_IDT           CPU_ENTRY_AREA_BASE
 127 #define CPU_ENTRY_AREA_PER_CPU          (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
 128 
 129 #define CPU_ENTRY_AREA_RO_IDT_VADDR     ((void *)CPU_ENTRY_AREA_RO_IDT)
 130 
 131 #define CPU_ENTRY_AREA_MAP_SIZE                 \
 132         (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
 133 
 134 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
 135 
 136 static inline struct entry_stack *cpu_entry_stack(int cpu)
 137 {
 138         return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
 139 }
 140 
 141 #define __this_cpu_ist_top_va(name)                                     \
 142         CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
 143 
 144 #endif

/* [<][>][^][v][top][bottom][index][help] */