root/arch/x86/include/asm/irqflags.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. native_save_fl
  2. native_restore_fl
  3. native_irq_disable
  4. native_irq_enable
  5. native_safe_halt
  6. native_halt
  7. arch_local_save_flags
  8. arch_local_irq_restore
  9. arch_local_irq_disable
  10. arch_local_irq_enable
  11. arch_safe_halt
  12. halt
  13. arch_local_irq_save
  14. arch_irqs_disabled_flags
  15. arch_irqs_disabled

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _X86_IRQFLAGS_H_
   3 #define _X86_IRQFLAGS_H_
   4 
   5 #include <asm/processor-flags.h>
   6 
   7 #ifndef __ASSEMBLY__
   8 
   9 #include <asm/nospec-branch.h>
  10 
  11 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  12 #define __cpuidle __attribute__((__section__(".cpuidle.text")))
  13 
  14 /*
  15  * Interrupt control:
  16  */
  17 
  18 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
  19 extern inline unsigned long native_save_fl(void);
  20 extern inline unsigned long native_save_fl(void)
  21 {
  22         unsigned long flags;
  23 
  24         /*
  25          * "=rm" is safe here, because "pop" adjusts the stack before
  26          * it evaluates its effective address -- this is part of the
  27          * documented behavior of the "pop" instruction.
  28          */
  29         asm volatile("# __raw_save_flags\n\t"
  30                      "pushf ; pop %0"
  31                      : "=rm" (flags)
  32                      : /* no input */
  33                      : "memory");
  34 
  35         return flags;
  36 }
  37 
  38 extern inline void native_restore_fl(unsigned long flags);
  39 extern inline void native_restore_fl(unsigned long flags)
  40 {
  41         asm volatile("push %0 ; popf"
  42                      : /* no output */
  43                      :"g" (flags)
  44                      :"memory", "cc");
  45 }
  46 
  47 static inline void native_irq_disable(void)
  48 {
  49         asm volatile("cli": : :"memory");
  50 }
  51 
  52 static inline void native_irq_enable(void)
  53 {
  54         asm volatile("sti": : :"memory");
  55 }
  56 
  57 static inline __cpuidle void native_safe_halt(void)
  58 {
  59         mds_idle_clear_cpu_buffers();
  60         asm volatile("sti; hlt": : :"memory");
  61 }
  62 
  63 static inline __cpuidle void native_halt(void)
  64 {
  65         mds_idle_clear_cpu_buffers();
  66         asm volatile("hlt": : :"memory");
  67 }
  68 
  69 #endif
  70 
  71 #ifdef CONFIG_PARAVIRT_XXL
  72 #include <asm/paravirt.h>
  73 #else
  74 #ifndef __ASSEMBLY__
  75 #include <linux/types.h>
  76 
  77 static inline notrace unsigned long arch_local_save_flags(void)
  78 {
  79         return native_save_fl();
  80 }
  81 
  82 static inline notrace void arch_local_irq_restore(unsigned long flags)
  83 {
  84         native_restore_fl(flags);
  85 }
  86 
  87 static inline notrace void arch_local_irq_disable(void)
  88 {
  89         native_irq_disable();
  90 }
  91 
  92 static inline notrace void arch_local_irq_enable(void)
  93 {
  94         native_irq_enable();
  95 }
  96 
  97 /*
  98  * Used in the idle loop; sti takes one instruction cycle
  99  * to complete:
 100  */
 101 static inline __cpuidle void arch_safe_halt(void)
 102 {
 103         native_safe_halt();
 104 }
 105 
 106 /*
 107  * Used when interrupts are already enabled or to
 108  * shutdown the processor:
 109  */
 110 static inline __cpuidle void halt(void)
 111 {
 112         native_halt();
 113 }
 114 
 115 /*
 116  * For spinlocks, etc:
 117  */
 118 static inline notrace unsigned long arch_local_irq_save(void)
 119 {
 120         unsigned long flags = arch_local_save_flags();
 121         arch_local_irq_disable();
 122         return flags;
 123 }
 124 #else
 125 
 126 #define ENABLE_INTERRUPTS(x)    sti
 127 #define DISABLE_INTERRUPTS(x)   cli
 128 
 129 #ifdef CONFIG_X86_64
 130 #ifdef CONFIG_DEBUG_ENTRY
 131 #define SAVE_FLAGS(x)           pushfq; popq %rax
 132 #endif
 133 
 134 #define SWAPGS  swapgs
 135 /*
 136  * Currently paravirt can't handle swapgs nicely when we
 137  * don't have a stack we can rely on (such as a user space
 138  * stack).  So we either find a way around these or just fault
 139  * and emulate if a guest tries to call swapgs directly.
 140  *
 141  * Either way, this is a good way to document that we don't
 142  * have a reliable stack. x86_64 only.
 143  */
 144 #define SWAPGS_UNSAFE_STACK     swapgs
 145 
 146 #define INTERRUPT_RETURN        jmp native_iret
 147 #define USERGS_SYSRET64                         \
 148         swapgs;                                 \
 149         sysretq;
 150 #define USERGS_SYSRET32                         \
 151         swapgs;                                 \
 152         sysretl
 153 
 154 #else
 155 #define INTERRUPT_RETURN                iret
 156 #endif
 157 
 158 #endif /* __ASSEMBLY__ */
 159 #endif /* CONFIG_PARAVIRT_XXL */
 160 
 161 #ifndef __ASSEMBLY__
 162 static inline int arch_irqs_disabled_flags(unsigned long flags)
 163 {
 164         return !(flags & X86_EFLAGS_IF);
 165 }
 166 
 167 static inline int arch_irqs_disabled(void)
 168 {
 169         unsigned long flags = arch_local_save_flags();
 170 
 171         return arch_irqs_disabled_flags(flags);
 172 }
 173 #endif /* !__ASSEMBLY__ */
 174 
 175 #ifdef __ASSEMBLY__
 176 #ifdef CONFIG_TRACE_IRQFLAGS
 177 #  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
 178 #  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
 179 #else
 180 #  define TRACE_IRQS_ON
 181 #  define TRACE_IRQS_OFF
 182 #endif
 183 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 184 #  ifdef CONFIG_X86_64
 185 #    define LOCKDEP_SYS_EXIT            call lockdep_sys_exit_thunk
 186 #    define LOCKDEP_SYS_EXIT_IRQ \
 187         TRACE_IRQS_ON; \
 188         sti; \
 189         call lockdep_sys_exit_thunk; \
 190         cli; \
 191         TRACE_IRQS_OFF;
 192 #  else
 193 #    define LOCKDEP_SYS_EXIT \
 194         pushl %eax;                             \
 195         pushl %ecx;                             \
 196         pushl %edx;                             \
 197         call lockdep_sys_exit;                  \
 198         popl %edx;                              \
 199         popl %ecx;                              \
 200         popl %eax;
 201 #    define LOCKDEP_SYS_EXIT_IRQ
 202 #  endif
 203 #else
 204 #  define LOCKDEP_SYS_EXIT
 205 #  define LOCKDEP_SYS_EXIT_IRQ
 206 #endif
 207 #endif /* __ASSEMBLY__ */
 208 
 209 #endif

/* [<][>][^][v][top][bottom][index][help] */