This source file includes following definitions.
- set_fs
- __chk_range_not_ok
- user_access_begin
1
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4
5
6
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13 #include <asm/extable.h>
14
15
16
17
18
19
20
21
22
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS MAKE_MM_SEG(-1UL)
26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
27
28 #define get_fs() (current->thread.addr_limit)
29 static inline void set_fs(mm_segment_t fs)
30 {
31 current->thread.addr_limit = fs;
32
33 set_thread_flag(TIF_FSCHECK);
34 }
35
36 #define segment_eq(a, b) ((a).seg == (b).seg)
37 #define user_addr_max() (current->thread.addr_limit.seg)
38
39
40
41
42
43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
44 {
45
46
47
48
49
50
51
52 if (__builtin_constant_p(size))
53 return unlikely(addr > limit - size);
54
55
56 addr += size;
57 if (unlikely(addr < size))
58 return true;
59 return unlikely(addr > limit);
60 }
61
62 #define __range_not_ok(addr, size, limit) \
63 ({ \
64 __chk_user_ptr(addr); \
65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
66 })
67
68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
69 static inline bool pagefault_disabled(void);
70 # define WARN_ON_IN_IRQ() \
71 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
72 #else
73 # define WARN_ON_IN_IRQ()
74 #endif
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93 #define access_ok(addr, size) \
94 ({ \
95 WARN_ON_IN_IRQ(); \
96 likely(!__range_not_ok(addr, size, user_addr_max())); \
97 })
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 extern int __get_user_1(void);
115 extern int __get_user_2(void);
116 extern int __get_user_4(void);
117 extern int __get_user_8(void);
118 extern int __get_user_bad(void);
119
120 #define __uaccess_begin() stac()
121 #define __uaccess_end() clac()
122 #define __uaccess_begin_nospec() \
123 ({ \
124 stac(); \
125 barrier_nospec(); \
126 })
127
128
129
130
131
132 #define __inttype(x) \
133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166 #define get_user(x, ptr) \
167 ({ \
168 int __ret_gu; \
169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
170 __chk_user_ptr(ptr); \
171 might_fault(); \
172 asm volatile("call __get_user_%P4" \
173 : "=a" (__ret_gu), "=r" (__val_gu), \
174 ASM_CALL_CONSTRAINT \
175 : "0" (ptr), "i" (sizeof(*(ptr)))); \
176 (x) = (__force __typeof__(*(ptr))) __val_gu; \
177 __builtin_expect(__ret_gu, 0); \
178 })
179
180 #define __put_user_x(size, x, ptr, __ret_pu) \
181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
183
184
185
186 #ifdef CONFIG_X86_32
187 #define __put_user_goto_u64(x, addr, label) \
188 asm_volatile_goto("\n" \
189 "1: movl %%eax,0(%1)\n" \
190 "2: movl %%edx,4(%1)\n" \
191 _ASM_EXTABLE_UA(1b, %l2) \
192 _ASM_EXTABLE_UA(2b, %l2) \
193 : : "A" (x), "r" (addr) \
194 : : label)
195
196 #define __put_user_asm_ex_u64(x, addr) \
197 asm volatile("\n" \
198 "1: movl %%eax,0(%1)\n" \
199 "2: movl %%edx,4(%1)\n" \
200 "3:" \
201 _ASM_EXTABLE_EX(1b, 2b) \
202 _ASM_EXTABLE_EX(2b, 3b) \
203 : : "A" (x), "r" (addr))
204
205 #define __put_user_x8(x, ptr, __ret_pu) \
206 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
207 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
208 #else
209 #define __put_user_goto_u64(x, ptr, label) \
210 __put_user_goto(x, ptr, "q", "", "er", label)
211 #define __put_user_asm_ex_u64(x, addr) \
212 __put_user_asm_ex(x, addr, "q", "", "er")
213 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
214 #endif
215
216 extern void __put_user_bad(void);
217
218
219
220
221
222 extern void __put_user_1(void);
223 extern void __put_user_2(void);
224 extern void __put_user_4(void);
225 extern void __put_user_8(void);
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 #define put_user(x, ptr) \
245 ({ \
246 int __ret_pu; \
247 __typeof__(*(ptr)) __pu_val; \
248 __chk_user_ptr(ptr); \
249 might_fault(); \
250 __pu_val = x; \
251 switch (sizeof(*(ptr))) { \
252 case 1: \
253 __put_user_x(1, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 2: \
256 __put_user_x(2, __pu_val, ptr, __ret_pu); \
257 break; \
258 case 4: \
259 __put_user_x(4, __pu_val, ptr, __ret_pu); \
260 break; \
261 case 8: \
262 __put_user_x8(__pu_val, ptr, __ret_pu); \
263 break; \
264 default: \
265 __put_user_x(X, __pu_val, ptr, __ret_pu); \
266 break; \
267 } \
268 __builtin_expect(__ret_pu, 0); \
269 })
270
271 #define __put_user_size(x, ptr, size, label) \
272 do { \
273 __chk_user_ptr(ptr); \
274 switch (size) { \
275 case 1: \
276 __put_user_goto(x, ptr, "b", "b", "iq", label); \
277 break; \
278 case 2: \
279 __put_user_goto(x, ptr, "w", "w", "ir", label); \
280 break; \
281 case 4: \
282 __put_user_goto(x, ptr, "l", "k", "ir", label); \
283 break; \
284 case 8: \
285 __put_user_goto_u64(x, ptr, label); \
286 break; \
287 default: \
288 __put_user_bad(); \
289 } \
290 } while (0)
291
292
293
294
295
296 #define __put_user_size_ex(x, ptr, size) \
297 do { \
298 __chk_user_ptr(ptr); \
299 switch (size) { \
300 case 1: \
301 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
302 break; \
303 case 2: \
304 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
305 break; \
306 case 4: \
307 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
308 break; \
309 case 8: \
310 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
311 break; \
312 default: \
313 __put_user_bad(); \
314 } \
315 } while (0)
316
317 #ifdef CONFIG_X86_32
318 #define __get_user_asm_u64(x, ptr, retval, errret) \
319 ({ \
320 __typeof__(ptr) __ptr = (ptr); \
321 asm volatile("\n" \
322 "1: movl %2,%%eax\n" \
323 "2: movl %3,%%edx\n" \
324 "3:\n" \
325 ".section .fixup,\"ax\"\n" \
326 "4: mov %4,%0\n" \
327 " xorl %%eax,%%eax\n" \
328 " xorl %%edx,%%edx\n" \
329 " jmp 3b\n" \
330 ".previous\n" \
331 _ASM_EXTABLE_UA(1b, 4b) \
332 _ASM_EXTABLE_UA(2b, 4b) \
333 : "=r" (retval), "=&A"(x) \
334 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
335 "i" (errret), "0" (retval)); \
336 })
337
338 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
339 #else
340 #define __get_user_asm_u64(x, ptr, retval, errret) \
341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
342 #define __get_user_asm_ex_u64(x, ptr) \
343 __get_user_asm_ex(x, ptr, "q", "", "=r")
344 #endif
345
346 #define __get_user_size(x, ptr, size, retval, errret) \
347 do { \
348 retval = 0; \
349 __chk_user_ptr(ptr); \
350 switch (size) { \
351 case 1: \
352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
353 break; \
354 case 2: \
355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
356 break; \
357 case 4: \
358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
359 break; \
360 case 8: \
361 __get_user_asm_u64(x, ptr, retval, errret); \
362 break; \
363 default: \
364 (x) = __get_user_bad(); \
365 } \
366 } while (0)
367
368 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
369 asm volatile("\n" \
370 "1: mov"itype" %2,%"rtype"1\n" \
371 "2:\n" \
372 ".section .fixup,\"ax\"\n" \
373 "3: mov %3,%0\n" \
374 " xor"itype" %"rtype"1,%"rtype"1\n" \
375 " jmp 2b\n" \
376 ".previous\n" \
377 _ASM_EXTABLE_UA(1b, 3b) \
378 : "=r" (err), ltype(x) \
379 : "m" (__m(addr)), "i" (errret), "0" (err))
380
381 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
382 asm volatile("\n" \
383 "1: mov"itype" %2,%"rtype"1\n" \
384 "2:\n" \
385 ".section .fixup,\"ax\"\n" \
386 "3: mov %3,%0\n" \
387 " jmp 2b\n" \
388 ".previous\n" \
389 _ASM_EXTABLE_UA(1b, 3b) \
390 : "=r" (err), ltype(x) \
391 : "m" (__m(addr)), "i" (errret), "0" (err))
392
393
394
395
396
397 #define __get_user_size_ex(x, ptr, size) \
398 do { \
399 __chk_user_ptr(ptr); \
400 switch (size) { \
401 case 1: \
402 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
403 break; \
404 case 2: \
405 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
406 break; \
407 case 4: \
408 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
409 break; \
410 case 8: \
411 __get_user_asm_ex_u64(x, ptr); \
412 break; \
413 default: \
414 (x) = __get_user_bad(); \
415 } \
416 } while (0)
417
418 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
419 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
420 "2:\n" \
421 ".section .fixup,\"ax\"\n" \
422 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
423 " jmp 2b\n" \
424 ".previous\n" \
425 _ASM_EXTABLE_EX(1b, 3b) \
426 : ltype(x) : "m" (__m(addr)))
427
428 #define __put_user_nocheck(x, ptr, size) \
429 ({ \
430 __label__ __pu_label; \
431 int __pu_err = -EFAULT; \
432 __typeof__(*(ptr)) __pu_val = (x); \
433 __typeof__(ptr) __pu_ptr = (ptr); \
434 __typeof__(size) __pu_size = (size); \
435 __uaccess_begin(); \
436 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
437 __pu_err = 0; \
438 __pu_label: \
439 __uaccess_end(); \
440 __builtin_expect(__pu_err, 0); \
441 })
442
443 #define __get_user_nocheck(x, ptr, size) \
444 ({ \
445 int __gu_err; \
446 __inttype(*(ptr)) __gu_val; \
447 __typeof__(ptr) __gu_ptr = (ptr); \
448 __typeof__(size) __gu_size = (size); \
449 __uaccess_begin_nospec(); \
450 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
451 __uaccess_end(); \
452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
453 __builtin_expect(__gu_err, 0); \
454 })
455
456
457 struct __large_struct { unsigned long buf[100]; };
458 #define __m(x) (*(struct __large_struct __user *)(x))
459
460
461
462
463
464
465 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \
466 asm_volatile_goto("\n" \
467 "1: mov"itype" %"rtype"0,%1\n" \
468 _ASM_EXTABLE_UA(1b, %l2) \
469 : : ltype(x), "m" (__m(addr)) \
470 : : label)
471
472 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
473 ({ __label__ __puflab; \
474 int __pufret = errret; \
475 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
476 __pufret = 0; \
477 __puflab: __pufret; })
478
479 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
480 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
481 } while (0)
482
483 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
484 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
485 "2:\n" \
486 _ASM_EXTABLE_EX(1b, 2b) \
487 : : ltype(x), "m" (__m(addr)))
488
489
490
491
492 #define uaccess_try do { \
493 current->thread.uaccess_err = 0; \
494 __uaccess_begin(); \
495 barrier();
496
497 #define uaccess_try_nospec do { \
498 current->thread.uaccess_err = 0; \
499 __uaccess_begin_nospec(); \
500
501 #define uaccess_catch(err) \
502 __uaccess_end(); \
503 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
504 } while (0)
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528 #define __get_user(x, ptr) \
529 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552 #define __put_user(x, ptr) \
553 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
554
555
556
557
558
559
560
561
562 #define get_user_try uaccess_try_nospec
563 #define get_user_catch(err) uaccess_catch(err)
564
565 #define get_user_ex(x, ptr) do { \
566 unsigned long __gue_val; \
567 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
568 (x) = (__force __typeof__(*(ptr)))__gue_val; \
569 } while (0)
570
571 #define put_user_try uaccess_try
572 #define put_user_catch(err) uaccess_catch(err)
573
574 #define put_user_ex(x, ptr) \
575 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
576
577 extern unsigned long
578 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
579 extern __must_check long
580 strncpy_from_user(char *dst, const char __user *src, long count);
581
582 extern __must_check long strnlen_user(const char __user *str, long n);
583
584 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
585 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
586
587 extern void __cmpxchg_wrong_size(void)
588 __compiletime_error("Bad argument size for cmpxchg");
589
590 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
591 ({ \
592 int __ret = 0; \
593 __typeof__(*(ptr)) __old = (old); \
594 __typeof__(*(ptr)) __new = (new); \
595 __uaccess_begin_nospec(); \
596 switch (size) { \
597 case 1: \
598 { \
599 asm volatile("\n" \
600 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
601 "2:\n" \
602 "\t.section .fixup, \"ax\"\n" \
603 "3:\tmov %3, %0\n" \
604 "\tjmp 2b\n" \
605 "\t.previous\n" \
606 _ASM_EXTABLE_UA(1b, 3b) \
607 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : "i" (-EFAULT), "q" (__new), "1" (__old) \
609 : "memory" \
610 ); \
611 break; \
612 } \
613 case 2: \
614 { \
615 asm volatile("\n" \
616 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
617 "2:\n" \
618 "\t.section .fixup, \"ax\"\n" \
619 "3:\tmov %3, %0\n" \
620 "\tjmp 2b\n" \
621 "\t.previous\n" \
622 _ASM_EXTABLE_UA(1b, 3b) \
623 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
624 : "i" (-EFAULT), "r" (__new), "1" (__old) \
625 : "memory" \
626 ); \
627 break; \
628 } \
629 case 4: \
630 { \
631 asm volatile("\n" \
632 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
633 "2:\n" \
634 "\t.section .fixup, \"ax\"\n" \
635 "3:\tmov %3, %0\n" \
636 "\tjmp 2b\n" \
637 "\t.previous\n" \
638 _ASM_EXTABLE_UA(1b, 3b) \
639 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
640 : "i" (-EFAULT), "r" (__new), "1" (__old) \
641 : "memory" \
642 ); \
643 break; \
644 } \
645 case 8: \
646 { \
647 if (!IS_ENABLED(CONFIG_X86_64)) \
648 __cmpxchg_wrong_size(); \
649 \
650 asm volatile("\n" \
651 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
652 "2:\n" \
653 "\t.section .fixup, \"ax\"\n" \
654 "3:\tmov %3, %0\n" \
655 "\tjmp 2b\n" \
656 "\t.previous\n" \
657 _ASM_EXTABLE_UA(1b, 3b) \
658 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
659 : "i" (-EFAULT), "r" (__new), "1" (__old) \
660 : "memory" \
661 ); \
662 break; \
663 } \
664 default: \
665 __cmpxchg_wrong_size(); \
666 } \
667 __uaccess_end(); \
668 *(uval) = __old; \
669 __ret; \
670 })
671
672 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
673 ({ \
674 access_ok((ptr), sizeof(*(ptr))) ? \
675 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
676 (old), (new), sizeof(*(ptr))) : \
677 -EFAULT; \
678 })
679
680
681
682
683 #ifdef CONFIG_X86_INTEL_USERCOPY
684 extern struct movsl_mask {
685 int mask;
686 } ____cacheline_aligned_in_smp movsl_mask;
687 #endif
688
689 #define ARCH_HAS_NOCACHE_UACCESS 1
690
691 #ifdef CONFIG_X86_32
692 # include <asm/uaccess_32.h>
693 #else
694 # include <asm/uaccess_64.h>
695 #endif
696
697
698
699
700
701
702
703
704 #define __copy_from_user_nmi __copy_from_user_inatomic
705
706
707
708
709
710
711
712 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
713 {
714 if (unlikely(!access_ok(ptr,len)))
715 return 0;
716 __uaccess_begin_nospec();
717 return 1;
718 }
719 #define user_access_begin(a,b) user_access_begin(a,b)
720 #define user_access_end() __uaccess_end()
721
722 #define user_access_save() smap_save()
723 #define user_access_restore(x) smap_restore(x)
724
725 #define unsafe_put_user(x, ptr, label) \
726 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
727
728 #define unsafe_get_user(x, ptr, err_label) \
729 do { \
730 int __gu_err; \
731 __inttype(*(ptr)) __gu_val; \
732 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
733 (x) = (__force __typeof__(*(ptr)))__gu_val; \
734 if (unlikely(__gu_err)) goto err_label; \
735 } while (0)
736
737
738
739
740
741 #define unsafe_copy_loop(dst, src, len, type, label) \
742 while (len >= sizeof(type)) { \
743 unsafe_put_user(*(type *)src,(type __user *)dst,label); \
744 dst += sizeof(type); \
745 src += sizeof(type); \
746 len -= sizeof(type); \
747 }
748
749 #define unsafe_copy_to_user(_dst,_src,_len,label) \
750 do { \
751 char __user *__ucu_dst = (_dst); \
752 const char *__ucu_src = (_src); \
753 size_t __ucu_len = (_len); \
754 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
755 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
756 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
757 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
758 } while (0)
759
760 #endif
761