1/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 *  Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
17#include <linux/context_tracking.h>
18#include <linux/rcupdate.h>
19#include <linux/sched.h>
20#include <linux/hardirq.h>
21#include <linux/export.h>
22#include <linux/kprobes.h>
23
24#define CREATE_TRACE_POINTS
25#include <trace/events/context_tracking.h>
26
27struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
28EXPORT_SYMBOL_GPL(context_tracking_enabled);
29
30DEFINE_PER_CPU(struct context_tracking, context_tracking);
31EXPORT_SYMBOL_GPL(context_tracking);
32
33void context_tracking_cpu_set(int cpu)
34{
35	if (!per_cpu(context_tracking.active, cpu)) {
36		per_cpu(context_tracking.active, cpu) = true;
37		static_key_slow_inc(&context_tracking_enabled);
38	}
39}
40
41/**
42 * context_tracking_enter - Inform the context tracking that the CPU is going
43 *                          enter user or guest space mode.
44 *
45 * This function must be called right before we switch from the kernel
46 * to user or guest space, when it's guaranteed the remaining kernel
47 * instructions to execute won't use any RCU read side critical section
48 * because this function sets RCU in extended quiescent state.
49 */
50void context_tracking_enter(enum ctx_state state)
51{
52	unsigned long flags;
53
54	/*
55	 * Repeat the user_enter() check here because some archs may be calling
56	 * this from asm and if no CPU needs context tracking, they shouldn't
57	 * go further. Repeat the check here until they support the inline static
58	 * key check.
59	 */
60	if (!context_tracking_is_enabled())
61		return;
62
63	/*
64	 * Some contexts may involve an exception occuring in an irq,
65	 * leading to that nesting:
66	 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
67	 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
68	 * helpers are enough to protect RCU uses inside the exception. So
69	 * just return immediately if we detect we are in an IRQ.
70	 */
71	if (in_interrupt())
72		return;
73
74	/* Kernel threads aren't supposed to go to userspace */
75	WARN_ON_ONCE(!current->mm);
76
77	local_irq_save(flags);
78	if ( __this_cpu_read(context_tracking.state) != state) {
79		if (__this_cpu_read(context_tracking.active)) {
80			/*
81			 * At this stage, only low level arch entry code remains and
82			 * then we'll run in userspace. We can assume there won't be
83			 * any RCU read-side critical section until the next call to
84			 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
85			 * on the tick.
86			 */
87			if (state == CONTEXT_USER) {
88				trace_user_enter(0);
89				vtime_user_enter(current);
90			}
91			rcu_user_enter();
92		}
93		/*
94		 * Even if context tracking is disabled on this CPU, because it's outside
95		 * the full dynticks mask for example, we still have to keep track of the
96		 * context transitions and states to prevent inconsistency on those of
97		 * other CPUs.
98		 * If a task triggers an exception in userspace, sleep on the exception
99		 * handler and then migrate to another CPU, that new CPU must know where
100		 * the exception returns by the time we call exception_exit().
101		 * This information can only be provided by the previous CPU when it called
102		 * exception_enter().
103		 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
104		 * is false because we know that CPU is not tickless.
105		 */
106		__this_cpu_write(context_tracking.state, state);
107	}
108	local_irq_restore(flags);
109}
110NOKPROBE_SYMBOL(context_tracking_enter);
111EXPORT_SYMBOL_GPL(context_tracking_enter);
112
113void context_tracking_user_enter(void)
114{
115	context_tracking_enter(CONTEXT_USER);
116}
117NOKPROBE_SYMBOL(context_tracking_user_enter);
118
119/**
120 * context_tracking_exit - Inform the context tracking that the CPU is
121 *                         exiting user or guest mode and entering the kernel.
122 *
123 * This function must be called after we entered the kernel from user or
124 * guest space before any use of RCU read side critical section. This
125 * potentially include any high level kernel code like syscalls, exceptions,
126 * signal handling, etc...
127 *
128 * This call supports re-entrancy. This way it can be called from any exception
129 * handler without needing to know if we came from userspace or not.
130 */
131void context_tracking_exit(enum ctx_state state)
132{
133	unsigned long flags;
134
135	if (!context_tracking_is_enabled())
136		return;
137
138	if (in_interrupt())
139		return;
140
141	local_irq_save(flags);
142	if (__this_cpu_read(context_tracking.state) == state) {
143		if (__this_cpu_read(context_tracking.active)) {
144			/*
145			 * We are going to run code that may use RCU. Inform
146			 * RCU core about that (ie: we may need the tick again).
147			 */
148			rcu_user_exit();
149			if (state == CONTEXT_USER) {
150				vtime_user_exit(current);
151				trace_user_exit(0);
152			}
153		}
154		__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
155	}
156	local_irq_restore(flags);
157}
158NOKPROBE_SYMBOL(context_tracking_exit);
159EXPORT_SYMBOL_GPL(context_tracking_exit);
160
161void context_tracking_user_exit(void)
162{
163	context_tracking_exit(CONTEXT_USER);
164}
165NOKPROBE_SYMBOL(context_tracking_user_exit);
166
167/**
168 * __context_tracking_task_switch - context switch the syscall callbacks
169 * @prev: the task that is being switched out
170 * @next: the task that is being switched in
171 *
172 * The context tracking uses the syscall slow path to implement its user-kernel
173 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
174 * path on CPUs that don't do context tracking.
175 *
176 * But we need to clear the flag on the previous task because it may later
177 * migrate to some CPU that doesn't do the context tracking. As such the TIF
178 * flag may not be desired there.
179 */
180void __context_tracking_task_switch(struct task_struct *prev,
181				    struct task_struct *next)
182{
183	clear_tsk_thread_flag(prev, TIF_NOHZ);
184	set_tsk_thread_flag(next, TIF_NOHZ);
185}
186
187#ifdef CONFIG_CONTEXT_TRACKING_FORCE
188void __init context_tracking_init(void)
189{
190	int cpu;
191
192	for_each_possible_cpu(cpu)
193		context_tracking_cpu_set(cpu);
194}
195#endif
196