root/kernel/context_tracking.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. context_tracking_recursion_enter
  2. context_tracking_recursion_exit
  3. __context_tracking_enter
  4. context_tracking_enter
  5. context_tracking_user_enter
  6. __context_tracking_exit
  7. context_tracking_exit
  8. context_tracking_user_exit
  9. context_tracking_cpu_set
  10. context_tracking_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Context tracking: Probe on high level context boundaries such as kernel
   4  * and userspace. This includes syscalls and exceptions entry/exit.
   5  *
   6  * This is used by RCU to remove its dependency on the timer tick while a CPU
   7  * runs in userspace.
   8  *
   9  *  Started by Frederic Weisbecker:
  10  *
  11  * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
  12  *
  13  * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
  14  * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
  15  *
  16  */
  17 
  18 #include <linux/context_tracking.h>
  19 #include <linux/rcupdate.h>
  20 #include <linux/sched.h>
  21 #include <linux/hardirq.h>
  22 #include <linux/export.h>
  23 #include <linux/kprobes.h>
  24 
  25 #define CREATE_TRACE_POINTS
  26 #include <trace/events/context_tracking.h>
  27 
  28 DEFINE_STATIC_KEY_FALSE(context_tracking_enabled);
  29 EXPORT_SYMBOL_GPL(context_tracking_enabled);
  30 
  31 DEFINE_PER_CPU(struct context_tracking, context_tracking);
  32 EXPORT_SYMBOL_GPL(context_tracking);
  33 
  34 static bool context_tracking_recursion_enter(void)
  35 {
  36         int recursion;
  37 
  38         recursion = __this_cpu_inc_return(context_tracking.recursion);
  39         if (recursion == 1)
  40                 return true;
  41 
  42         WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
  43         __this_cpu_dec(context_tracking.recursion);
  44 
  45         return false;
  46 }
  47 
  48 static void context_tracking_recursion_exit(void)
  49 {
  50         __this_cpu_dec(context_tracking.recursion);
  51 }
  52 
  53 /**
  54  * context_tracking_enter - Inform the context tracking that the CPU is going
  55  *                          enter user or guest space mode.
  56  *
  57  * This function must be called right before we switch from the kernel
  58  * to user or guest space, when it's guaranteed the remaining kernel
  59  * instructions to execute won't use any RCU read side critical section
  60  * because this function sets RCU in extended quiescent state.
  61  */
  62 void __context_tracking_enter(enum ctx_state state)
  63 {
  64         /* Kernel threads aren't supposed to go to userspace */
  65         WARN_ON_ONCE(!current->mm);
  66 
  67         if (!context_tracking_recursion_enter())
  68                 return;
  69 
  70         if ( __this_cpu_read(context_tracking.state) != state) {
  71                 if (__this_cpu_read(context_tracking.active)) {
  72                         /*
  73                          * At this stage, only low level arch entry code remains and
  74                          * then we'll run in userspace. We can assume there won't be
  75                          * any RCU read-side critical section until the next call to
  76                          * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
  77                          * on the tick.
  78                          */
  79                         if (state == CONTEXT_USER) {
  80                                 trace_user_enter(0);
  81                                 vtime_user_enter(current);
  82                         }
  83                         rcu_user_enter();
  84                 }
  85                 /*
  86                  * Even if context tracking is disabled on this CPU, because it's outside
  87                  * the full dynticks mask for example, we still have to keep track of the
  88                  * context transitions and states to prevent inconsistency on those of
  89                  * other CPUs.
  90                  * If a task triggers an exception in userspace, sleep on the exception
  91                  * handler and then migrate to another CPU, that new CPU must know where
  92                  * the exception returns by the time we call exception_exit().
  93                  * This information can only be provided by the previous CPU when it called
  94                  * exception_enter().
  95                  * OTOH we can spare the calls to vtime and RCU when context_tracking.active
  96                  * is false because we know that CPU is not tickless.
  97                  */
  98                 __this_cpu_write(context_tracking.state, state);
  99         }
 100         context_tracking_recursion_exit();
 101 }
 102 NOKPROBE_SYMBOL(__context_tracking_enter);
 103 EXPORT_SYMBOL_GPL(__context_tracking_enter);
 104 
 105 void context_tracking_enter(enum ctx_state state)
 106 {
 107         unsigned long flags;
 108 
 109         /*
 110          * Some contexts may involve an exception occuring in an irq,
 111          * leading to that nesting:
 112          * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
 113          * This would mess up the dyntick_nesting count though. And rcu_irq_*()
 114          * helpers are enough to protect RCU uses inside the exception. So
 115          * just return immediately if we detect we are in an IRQ.
 116          */
 117         if (in_interrupt())
 118                 return;
 119 
 120         local_irq_save(flags);
 121         __context_tracking_enter(state);
 122         local_irq_restore(flags);
 123 }
 124 NOKPROBE_SYMBOL(context_tracking_enter);
 125 EXPORT_SYMBOL_GPL(context_tracking_enter);
 126 
 127 void context_tracking_user_enter(void)
 128 {
 129         user_enter();
 130 }
 131 NOKPROBE_SYMBOL(context_tracking_user_enter);
 132 
 133 /**
 134  * context_tracking_exit - Inform the context tracking that the CPU is
 135  *                         exiting user or guest mode and entering the kernel.
 136  *
 137  * This function must be called after we entered the kernel from user or
 138  * guest space before any use of RCU read side critical section. This
 139  * potentially include any high level kernel code like syscalls, exceptions,
 140  * signal handling, etc...
 141  *
 142  * This call supports re-entrancy. This way it can be called from any exception
 143  * handler without needing to know if we came from userspace or not.
 144  */
 145 void __context_tracking_exit(enum ctx_state state)
 146 {
 147         if (!context_tracking_recursion_enter())
 148                 return;
 149 
 150         if (__this_cpu_read(context_tracking.state) == state) {
 151                 if (__this_cpu_read(context_tracking.active)) {
 152                         /*
 153                          * We are going to run code that may use RCU. Inform
 154                          * RCU core about that (ie: we may need the tick again).
 155                          */
 156                         rcu_user_exit();
 157                         if (state == CONTEXT_USER) {
 158                                 vtime_user_exit(current);
 159                                 trace_user_exit(0);
 160                         }
 161                 }
 162                 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
 163         }
 164         context_tracking_recursion_exit();
 165 }
 166 NOKPROBE_SYMBOL(__context_tracking_exit);
 167 EXPORT_SYMBOL_GPL(__context_tracking_exit);
 168 
 169 void context_tracking_exit(enum ctx_state state)
 170 {
 171         unsigned long flags;
 172 
 173         if (in_interrupt())
 174                 return;
 175 
 176         local_irq_save(flags);
 177         __context_tracking_exit(state);
 178         local_irq_restore(flags);
 179 }
 180 NOKPROBE_SYMBOL(context_tracking_exit);
 181 EXPORT_SYMBOL_GPL(context_tracking_exit);
 182 
 183 void context_tracking_user_exit(void)
 184 {
 185         user_exit();
 186 }
 187 NOKPROBE_SYMBOL(context_tracking_user_exit);
 188 
 189 void __init context_tracking_cpu_set(int cpu)
 190 {
 191         static __initdata bool initialized = false;
 192 
 193         if (!per_cpu(context_tracking.active, cpu)) {
 194                 per_cpu(context_tracking.active, cpu) = true;
 195                 static_branch_inc(&context_tracking_enabled);
 196         }
 197 
 198         if (initialized)
 199                 return;
 200 
 201         /*
 202          * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
 203          * This assumes that init is the only task at this early boot stage.
 204          */
 205         set_tsk_thread_flag(&init_task, TIF_NOHZ);
 206         WARN_ON_ONCE(!tasklist_empty());
 207 
 208         initialized = true;
 209 }
 210 
 211 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
 212 void __init context_tracking_init(void)
 213 {
 214         int cpu;
 215 
 216         for_each_possible_cpu(cpu)
 217                 context_tracking_cpu_set(cpu);
 218 }
 219 #endif

/* [<][>][^][v][top][bottom][index][help] */