This source file includes following definitions.
- rseq_update_cpu_id
- rseq_reset_rseq_cpu_id
- rseq_get_rseq_cs
- rseq_need_restart
- clear_rseq_cs
- in_rseq_cs
- rseq_ip_fixup
- __rseq_handle_notify_resume
- rseq_syscall
- SYSCALL_DEFINE4
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/uaccess.h>
13 #include <linux/syscalls.h>
14 #include <linux/rseq.h>
15 #include <linux/types.h>
16 #include <asm/ptrace.h>
17
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/rseq.h>
20
21 #define RSEQ_CS_PREEMPT_MIGRATE_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE | \
22 RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT)
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 static int rseq_update_cpu_id(struct task_struct *t)
85 {
86 u32 cpu_id = raw_smp_processor_id();
87
88 if (put_user(cpu_id, &t->rseq->cpu_id_start))
89 return -EFAULT;
90 if (put_user(cpu_id, &t->rseq->cpu_id))
91 return -EFAULT;
92 trace_rseq_update(t);
93 return 0;
94 }
95
96 static int rseq_reset_rseq_cpu_id(struct task_struct *t)
97 {
98 u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
99
100
101
102
103 if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
104 return -EFAULT;
105
106
107
108
109
110 if (put_user(cpu_id, &t->rseq->cpu_id))
111 return -EFAULT;
112 return 0;
113 }
114
115 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
116 {
117 struct rseq_cs __user *urseq_cs;
118 u64 ptr;
119 u32 __user *usig;
120 u32 sig;
121 int ret;
122
123 if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
124 return -EFAULT;
125 if (!ptr) {
126 memset(rseq_cs, 0, sizeof(*rseq_cs));
127 return 0;
128 }
129 if (ptr >= TASK_SIZE)
130 return -EINVAL;
131 urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
132 if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
133 return -EFAULT;
134
135 if (rseq_cs->start_ip >= TASK_SIZE ||
136 rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
137 rseq_cs->abort_ip >= TASK_SIZE ||
138 rseq_cs->version > 0)
139 return -EINVAL;
140
141 if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
142 return -EINVAL;
143
144 if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
145 return -EINVAL;
146
147 usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
148 ret = get_user(sig, usig);
149 if (ret)
150 return ret;
151
152 if (current->rseq_sig != sig) {
153 printk_ratelimited(KERN_WARNING
154 "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
155 sig, current->rseq_sig, current->pid, usig);
156 return -EINVAL;
157 }
158 return 0;
159 }
160
161 static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
162 {
163 u32 flags, event_mask;
164 int ret;
165
166
167 ret = get_user(flags, &t->rseq->flags);
168 if (ret)
169 return ret;
170
171
172 flags |= cs_flags;
173
174
175
176
177
178
179
180 if (unlikely((flags & RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL) &&
181 (flags & RSEQ_CS_PREEMPT_MIGRATE_FLAGS) !=
182 RSEQ_CS_PREEMPT_MIGRATE_FLAGS))
183 return -EINVAL;
184
185
186
187
188
189 preempt_disable();
190 event_mask = t->rseq_event_mask;
191 t->rseq_event_mask = 0;
192 preempt_enable();
193
194 return !!(event_mask & ~flags);
195 }
196
197 static int clear_rseq_cs(struct task_struct *t)
198 {
199
200
201
202
203
204
205
206
207 if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
208 return -EFAULT;
209 return 0;
210 }
211
212
213
214
215
216 static bool in_rseq_cs(unsigned long ip, struct rseq_cs *rseq_cs)
217 {
218 return ip - rseq_cs->start_ip < rseq_cs->post_commit_offset;
219 }
220
221 static int rseq_ip_fixup(struct pt_regs *regs)
222 {
223 unsigned long ip = instruction_pointer(regs);
224 struct task_struct *t = current;
225 struct rseq_cs rseq_cs;
226 int ret;
227
228 ret = rseq_get_rseq_cs(t, &rseq_cs);
229 if (ret)
230 return ret;
231
232
233
234
235
236
237 if (!in_rseq_cs(ip, &rseq_cs))
238 return clear_rseq_cs(t);
239 ret = rseq_need_restart(t, rseq_cs.flags);
240 if (ret <= 0)
241 return ret;
242 ret = clear_rseq_cs(t);
243 if (ret)
244 return ret;
245 trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset,
246 rseq_cs.abort_ip);
247 instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip);
248 return 0;
249 }
250
251
252
253
254
255
256
257
258
259
260
261
262 void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
263 {
264 struct task_struct *t = current;
265 int ret, sig;
266
267 if (unlikely(t->flags & PF_EXITING))
268 return;
269 if (unlikely(!access_ok(t->rseq, sizeof(*t->rseq))))
270 goto error;
271 ret = rseq_ip_fixup(regs);
272 if (unlikely(ret < 0))
273 goto error;
274 if (unlikely(rseq_update_cpu_id(t)))
275 goto error;
276 return;
277
278 error:
279 sig = ksig ? ksig->sig : 0;
280 force_sigsegv(sig);
281 }
282
283 #ifdef CONFIG_DEBUG_RSEQ
284
285
286
287
288
289 void rseq_syscall(struct pt_regs *regs)
290 {
291 unsigned long ip = instruction_pointer(regs);
292 struct task_struct *t = current;
293 struct rseq_cs rseq_cs;
294
295 if (!t->rseq)
296 return;
297 if (!access_ok(t->rseq, sizeof(*t->rseq)) ||
298 rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs))
299 force_sig(SIGSEGV);
300 }
301
302 #endif
303
304
305
306
307 SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
308 int, flags, u32, sig)
309 {
310 int ret;
311
312 if (flags & RSEQ_FLAG_UNREGISTER) {
313
314 if (current->rseq != rseq || !current->rseq)
315 return -EINVAL;
316 if (rseq_len != sizeof(*rseq))
317 return -EINVAL;
318 if (current->rseq_sig != sig)
319 return -EPERM;
320 ret = rseq_reset_rseq_cpu_id(current);
321 if (ret)
322 return ret;
323 current->rseq = NULL;
324 current->rseq_sig = 0;
325 return 0;
326 }
327
328 if (unlikely(flags))
329 return -EINVAL;
330
331 if (current->rseq) {
332
333
334
335
336
337 if (current->rseq != rseq || rseq_len != sizeof(*rseq))
338 return -EINVAL;
339 if (current->rseq_sig != sig)
340 return -EPERM;
341
342 return -EBUSY;
343 }
344
345
346
347
348
349 if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) ||
350 rseq_len != sizeof(*rseq))
351 return -EINVAL;
352 if (!access_ok(rseq, rseq_len))
353 return -EFAULT;
354 current->rseq = rseq;
355 current->rseq_sig = sig;
356
357
358
359
360
361 rseq_set_notify_resume(current);
362
363 return 0;
364 }