root/arch/x86/include/asm/switch_to.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. prepare_switch_to
  2. refresh_sysenter_cs
  3. update_task_stack

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_X86_SWITCH_TO_H
   3 #define _ASM_X86_SWITCH_TO_H
   4 
   5 #include <linux/sched/task_stack.h>
   6 
   7 struct task_struct; /* one of the stranger aspects of C forward declarations */
   8 
   9 struct task_struct *__switch_to_asm(struct task_struct *prev,
  10                                     struct task_struct *next);
  11 
  12 __visible struct task_struct *__switch_to(struct task_struct *prev,
  13                                           struct task_struct *next);
  14 
  15 /* This runs runs on the previous thread's stack. */
  16 static inline void prepare_switch_to(struct task_struct *next)
  17 {
  18 #ifdef CONFIG_VMAP_STACK
  19         /*
  20          * If we switch to a stack that has a top-level paging entry
  21          * that is not present in the current mm, the resulting #PF will
  22          * will be promoted to a double-fault and we'll panic.  Probe
  23          * the new stack now so that vmalloc_fault can fix up the page
  24          * tables if needed.  This can only happen if we use a stack
  25          * in vmap space.
  26          *
  27          * We assume that the stack is aligned so that it never spans
  28          * more than one top-level paging entry.
  29          *
  30          * To minimize cache pollution, just follow the stack pointer.
  31          */
  32         READ_ONCE(*(unsigned char *)next->thread.sp);
  33 #endif
  34 }
  35 
  36 asmlinkage void ret_from_fork(void);
  37 
  38 /*
  39  * This is the structure pointed to by thread.sp for an inactive task.  The
  40  * order of the fields must match the code in __switch_to_asm().
  41  */
  42 struct inactive_task_frame {
  43 #ifdef CONFIG_X86_64
  44         unsigned long r15;
  45         unsigned long r14;
  46         unsigned long r13;
  47         unsigned long r12;
  48 #else
  49         unsigned long flags;
  50         unsigned long si;
  51         unsigned long di;
  52 #endif
  53         unsigned long bx;
  54 
  55         /*
  56          * These two fields must be together.  They form a stack frame header,
  57          * needed by get_frame_pointer().
  58          */
  59         unsigned long bp;
  60         unsigned long ret_addr;
  61 };
  62 
  63 struct fork_frame {
  64         struct inactive_task_frame frame;
  65         struct pt_regs regs;
  66 };
  67 
  68 #define switch_to(prev, next, last)                                     \
  69 do {                                                                    \
  70         prepare_switch_to(next);                                        \
  71                                                                         \
  72         ((last) = __switch_to_asm((prev), (next)));                     \
  73 } while (0)
  74 
  75 #ifdef CONFIG_X86_32
  76 static inline void refresh_sysenter_cs(struct thread_struct *thread)
  77 {
  78         /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  79         if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
  80                 return;
  81 
  82         this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
  83         wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  84 }
  85 #endif
  86 
  87 /* This is used when switching tasks or entering/exiting vm86 mode. */
  88 static inline void update_task_stack(struct task_struct *task)
  89 {
  90         /* sp0 always points to the entry trampoline stack, which is constant: */
  91 #ifdef CONFIG_X86_32
  92         if (static_cpu_has(X86_FEATURE_XENPV))
  93                 load_sp0(task->thread.sp0);
  94         else
  95                 this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
  96 #else
  97         /*
  98          * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
  99          * doesn't work on x86-32 because sp1 and
 100          * cpu_current_top_of_stack have different values (because of
 101          * the non-zero stack-padding on 32bit).
 102          */
 103         if (static_cpu_has(X86_FEATURE_XENPV))
 104                 load_sp0(task_top_of_stack(task));
 105 #endif
 106 
 107 }
 108 
 109 #endif /* _ASM_X86_SWITCH_TO_H */

/* [<][>][^][v][top][bottom][index][help] */