This source file includes following definitions.
- check_for_xstate
- save_fsave_header
- save_xstate_epilog
- copy_fpregs_to_sigframe
- copy_fpstate_to_sigframe
- sanitize_restored_xstate
- copy_user_to_fpregs_zeroing
- __fpu__restore_sig
- xstate_sigframe_size
- fpu__restore_sig
- fpu__alloc_mathframe
- fpu__init_prepare_fx_sw_frame
1
2
3
4
5
6 #include <linux/compat.h>
7 #include <linux/cpu.h>
8 #include <linux/pagemap.h>
9
10 #include <asm/fpu/internal.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/regset.h>
13 #include <asm/fpu/xstate.h>
14
15 #include <asm/sigframe.h>
16 #include <asm/trace/fpu.h>
17
18 static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
19
20
21
22
23
24 static inline int check_for_xstate(struct fxregs_state __user *buf,
25 void __user *fpstate,
26 struct _fpx_sw_bytes *fx_sw)
27 {
28 int min_xstate_size = sizeof(struct fxregs_state) +
29 sizeof(struct xstate_header);
30 unsigned int magic2;
31
32 if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
33 return -1;
34
35
36 if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
37 fx_sw->xstate_size < min_xstate_size ||
38 fx_sw->xstate_size > fpu_user_xstate_size ||
39 fx_sw->xstate_size > fx_sw->extended_size)
40 return -1;
41
42
43
44
45
46
47
48 if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
49 || magic2 != FP_XSTATE_MAGIC2)
50 return -1;
51
52 return 0;
53 }
54
55
56
57
58 static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
59 {
60 if (use_fxsr()) {
61 struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
62 struct user_i387_ia32_struct env;
63 struct _fpstate_32 __user *fp = buf;
64
65 fpregs_lock();
66 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
67 copy_fxregs_to_kernel(&tsk->thread.fpu);
68 fpregs_unlock();
69
70 convert_from_fxsr(&env, tsk);
71
72 if (__copy_to_user(buf, &env, sizeof(env)) ||
73 __put_user(xsave->i387.swd, &fp->status) ||
74 __put_user(X86_FXSR_MAGIC, &fp->magic))
75 return -1;
76 } else {
77 struct fregs_state __user *fp = buf;
78 u32 swd;
79 if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
80 return -1;
81 }
82
83 return 0;
84 }
85
86 static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
87 {
88 struct xregs_state __user *x = buf;
89 struct _fpx_sw_bytes *sw_bytes;
90 u32 xfeatures;
91 int err;
92
93
94 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
95 err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
96
97 if (!use_xsave())
98 return err;
99
100 err |= __put_user(FP_XSTATE_MAGIC2,
101 (__u32 __user *)(buf + fpu_user_xstate_size));
102
103
104
105
106
107 err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
108
109
110
111
112
113
114
115
116
117
118
119
120 xfeatures |= XFEATURE_MASK_FPSSE;
121
122 err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
123
124 return err;
125 }
126
127 static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
128 {
129 int err;
130
131 if (use_xsave())
132 err = copy_xregs_to_user(buf);
133 else if (use_fxsr())
134 err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
135 else
136 err = copy_fregs_to_user((struct fregs_state __user *) buf);
137
138 if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
139 err = -EFAULT;
140 return err;
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164 int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
165 {
166 struct task_struct *tsk = current;
167 int ia32_fxstate = (buf != buf_fx);
168 int ret;
169
170 ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
171 IS_ENABLED(CONFIG_IA32_EMULATION));
172
173 if (!access_ok(buf, size))
174 return -EACCES;
175
176 if (!static_cpu_has(X86_FEATURE_FPU))
177 return fpregs_soft_get(current, NULL, 0,
178 sizeof(struct user_i387_ia32_struct), NULL,
179 (struct _fpstate_32 __user *) buf) ? -1 : 1;
180
181 retry:
182
183
184
185
186
187
188 fpregs_lock();
189 if (test_thread_flag(TIF_NEED_FPU_LOAD))
190 __fpregs_load_activate();
191
192 pagefault_disable();
193 ret = copy_fpregs_to_sigframe(buf_fx);
194 pagefault_enable();
195 fpregs_unlock();
196
197 if (ret) {
198 if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
199 goto retry;
200 return -EFAULT;
201 }
202
203
204 if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
205 return -1;
206
207 if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
208 return -1;
209
210 return 0;
211 }
212
213 static inline void
214 sanitize_restored_xstate(union fpregs_state *state,
215 struct user_i387_ia32_struct *ia32_env,
216 u64 xfeatures, int fx_only)
217 {
218 struct xregs_state *xsave = &state->xsave;
219 struct xstate_header *header = &xsave->header;
220
221 if (use_xsave()) {
222
223
224
225
226
227
228
229
230
231
232 if (fx_only)
233 header->xfeatures = XFEATURE_MASK_FPSSE;
234 else
235 header->xfeatures &= xfeatures;
236 }
237
238 if (use_fxsr()) {
239
240
241
242
243 xsave->i387.mxcsr &= mxcsr_feature_mask;
244
245 if (ia32_env)
246 convert_to_fxsr(&state->fxsave, ia32_env);
247 }
248 }
249
250
251
252
253 static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
254 {
255 if (use_xsave()) {
256 if (fx_only) {
257 u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
258 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
259 return copy_user_to_fxregs(buf);
260 } else {
261 u64 init_bv = xfeatures_mask & ~xbv;
262 if (unlikely(init_bv))
263 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
264 return copy_user_to_xregs(buf, xbv);
265 }
266 } else if (use_fxsr()) {
267 return copy_user_to_fxregs(buf);
268 } else
269 return copy_user_to_fregs(buf);
270 }
271
272 static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
273 {
274 struct user_i387_ia32_struct *envp = NULL;
275 int state_size = fpu_kernel_xstate_size;
276 int ia32_fxstate = (buf != buf_fx);
277 struct task_struct *tsk = current;
278 struct fpu *fpu = &tsk->thread.fpu;
279 struct user_i387_ia32_struct env;
280 u64 xfeatures = 0;
281 int fx_only = 0;
282 int ret = 0;
283
284 ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
285 IS_ENABLED(CONFIG_IA32_EMULATION));
286
287 if (!buf) {
288 fpu__clear(fpu);
289 return 0;
290 }
291
292 if (!access_ok(buf, size))
293 return -EACCES;
294
295 if (!static_cpu_has(X86_FEATURE_FPU))
296 return fpregs_soft_set(current, NULL,
297 0, sizeof(struct user_i387_ia32_struct),
298 NULL, buf) != 0;
299
300 if (use_xsave()) {
301 struct _fpx_sw_bytes fx_sw_user;
302 if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
303
304
305
306
307
308 state_size = sizeof(struct fxregs_state);
309 fx_only = 1;
310 trace_x86_fpu_xstate_check_failed(fpu);
311 } else {
312 state_size = fx_sw_user.xstate_size;
313 xfeatures = fx_sw_user.xfeatures;
314 }
315 }
316
317
318
319
320
321
322
323
324 set_thread_flag(TIF_NEED_FPU_LOAD);
325 __fpu_invalidate_fpregs_state(fpu);
326
327 if ((unsigned long)buf_fx % 64)
328 fx_only = 1;
329
330
331
332
333 if (ia32_fxstate) {
334 ret = __copy_from_user(&env, buf, sizeof(env));
335 if (ret)
336 goto err_out;
337 envp = &env;
338 } else {
339
340
341
342
343
344
345
346 fpregs_lock();
347 pagefault_disable();
348 ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only);
349 pagefault_enable();
350 if (!ret) {
351 fpregs_mark_activate();
352 fpregs_unlock();
353 return 0;
354 }
355 fpregs_deactivate(fpu);
356 fpregs_unlock();
357 }
358
359
360 if (use_xsave() && !fx_only) {
361 u64 init_bv = xfeatures_mask & ~xfeatures;
362
363 if (using_compacted_format()) {
364 ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
365 } else {
366 ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
367
368 if (!ret && state_size > offsetof(struct xregs_state, header))
369 ret = validate_xstate_header(&fpu->state.xsave.header);
370 }
371 if (ret)
372 goto err_out;
373
374 sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
375
376 fpregs_lock();
377 if (unlikely(init_bv))
378 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
379 ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures);
380
381 } else if (use_fxsr()) {
382 ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
383 if (ret) {
384 ret = -EFAULT;
385 goto err_out;
386 }
387
388 sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
389
390 fpregs_lock();
391 if (use_xsave()) {
392 u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
393 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
394 }
395
396 ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
397 } else {
398 ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
399 if (ret)
400 goto err_out;
401
402 fpregs_lock();
403 ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
404 }
405 if (!ret)
406 fpregs_mark_activate();
407 else
408 fpregs_deactivate(fpu);
409 fpregs_unlock();
410
411 err_out:
412 if (ret)
413 fpu__clear(fpu);
414 return ret;
415 }
416
417 static inline int xstate_sigframe_size(void)
418 {
419 return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
420 fpu_user_xstate_size;
421 }
422
423
424
425
426 int fpu__restore_sig(void __user *buf, int ia32_frame)
427 {
428 void __user *buf_fx = buf;
429 int size = xstate_sigframe_size();
430
431 if (ia32_frame && use_fxsr()) {
432 buf_fx = buf + sizeof(struct fregs_state);
433 size += sizeof(struct fregs_state);
434 }
435
436 return __fpu__restore_sig(buf, buf_fx, size);
437 }
438
439 unsigned long
440 fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
441 unsigned long *buf_fx, unsigned long *size)
442 {
443 unsigned long frame_size = xstate_sigframe_size();
444
445 *buf_fx = sp = round_down(sp - frame_size, 64);
446 if (ia32_frame && use_fxsr()) {
447 frame_size += sizeof(struct fregs_state);
448 sp -= sizeof(struct fregs_state);
449 }
450
451 *size = frame_size;
452
453 return sp;
454 }
455
456
457
458
459
460
461
462 void fpu__init_prepare_fx_sw_frame(void)
463 {
464 int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE;
465
466 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
467 fx_sw_reserved.extended_size = size;
468 fx_sw_reserved.xfeatures = xfeatures_mask;
469 fx_sw_reserved.xstate_size = fpu_user_xstate_size;
470
471 if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
472 IS_ENABLED(CONFIG_X86_32)) {
473 int fsave_header_size = sizeof(struct fregs_state);
474
475 fx_sw_reserved_ia32 = fx_sw_reserved;
476 fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
477 }
478 }
479