This source file includes following definitions.
- __copy_from_user_inatomic
- __copy_from_user
- __copy_to_user_inatomic
- __copy_to_user
- _copy_from_user
- _copy_to_user
- copy_from_user
- copy_to_user
- copy_in_user
- pagefault_disabled_inc
- pagefault_disabled_dec
- pagefault_disable
- pagefault_enable
- pagefault_disabled
- __copy_from_user_inatomic_nocache
- copy_struct_from_user
- user_access_save
- user_access_restore
1
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4
5 #include <linux/sched.h>
6 #include <linux/thread_info.h>
7 #include <linux/kasan-checks.h>
8
9 #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
10
11 #include <asm/uaccess.h>
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 static __always_inline __must_check unsigned long
59 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60 {
61 kasan_check_write(to, n);
62 check_object_size(to, n, false);
63 return raw_copy_from_user(to, from, n);
64 }
65
66 static __always_inline __must_check unsigned long
67 __copy_from_user(void *to, const void __user *from, unsigned long n)
68 {
69 might_fault();
70 kasan_check_write(to, n);
71 check_object_size(to, n, false);
72 return raw_copy_from_user(to, from, n);
73 }
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 static __always_inline __must_check unsigned long
89 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
90 {
91 kasan_check_read(from, n);
92 check_object_size(from, n, true);
93 return raw_copy_to_user(to, from, n);
94 }
95
96 static __always_inline __must_check unsigned long
97 __copy_to_user(void __user *to, const void *from, unsigned long n)
98 {
99 might_fault();
100 kasan_check_read(from, n);
101 check_object_size(from, n, true);
102 return raw_copy_to_user(to, from, n);
103 }
104
105 #ifdef INLINE_COPY_FROM_USER
106 static inline __must_check unsigned long
107 _copy_from_user(void *to, const void __user *from, unsigned long n)
108 {
109 unsigned long res = n;
110 might_fault();
111 if (likely(access_ok(from, n))) {
112 kasan_check_write(to, n);
113 res = raw_copy_from_user(to, from, n);
114 }
115 if (unlikely(res))
116 memset(to + (n - res), 0, res);
117 return res;
118 }
119 #else
120 extern __must_check unsigned long
121 _copy_from_user(void *, const void __user *, unsigned long);
122 #endif
123
124 #ifdef INLINE_COPY_TO_USER
125 static inline __must_check unsigned long
126 _copy_to_user(void __user *to, const void *from, unsigned long n)
127 {
128 might_fault();
129 if (access_ok(to, n)) {
130 kasan_check_read(from, n);
131 n = raw_copy_to_user(to, from, n);
132 }
133 return n;
134 }
135 #else
136 extern __must_check unsigned long
137 _copy_to_user(void __user *, const void *, unsigned long);
138 #endif
139
140 static __always_inline unsigned long __must_check
141 copy_from_user(void *to, const void __user *from, unsigned long n)
142 {
143 if (likely(check_copy_size(to, n, false)))
144 n = _copy_from_user(to, from, n);
145 return n;
146 }
147
148 static __always_inline unsigned long __must_check
149 copy_to_user(void __user *to, const void *from, unsigned long n)
150 {
151 if (likely(check_copy_size(from, n, true)))
152 n = _copy_to_user(to, from, n);
153 return n;
154 }
155 #ifdef CONFIG_COMPAT
156 static __always_inline unsigned long __must_check
157 copy_in_user(void __user *to, const void __user *from, unsigned long n)
158 {
159 might_fault();
160 if (access_ok(to, n) && access_ok(from, n))
161 n = raw_copy_in_user(to, from, n);
162 return n;
163 }
164 #endif
165
166 static __always_inline void pagefault_disabled_inc(void)
167 {
168 current->pagefault_disabled++;
169 }
170
171 static __always_inline void pagefault_disabled_dec(void)
172 {
173 current->pagefault_disabled--;
174 }
175
176
177
178
179
180
181
182
183 static inline void pagefault_disable(void)
184 {
185 pagefault_disabled_inc();
186
187
188
189
190 barrier();
191 }
192
193 static inline void pagefault_enable(void)
194 {
195
196
197
198
199 barrier();
200 pagefault_disabled_dec();
201 }
202
203
204
205
206 static inline bool pagefault_disabled(void)
207 {
208 return current->pagefault_disabled != 0;
209 }
210
211
212
213
214
215
216
217
218
219
220
221 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
222
223 #ifndef ARCH_HAS_NOCACHE_UACCESS
224
225 static inline __must_check unsigned long
226 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
227 unsigned long n)
228 {
229 return __copy_from_user_inatomic(to, from, n);
230 }
231
232 #endif
233
234 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283 static __always_inline __must_check int
284 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
285 size_t usize)
286 {
287 size_t size = min(ksize, usize);
288 size_t rest = max(ksize, usize) - size;
289
290
291 if (usize < ksize) {
292 memset(dst + size, 0, rest);
293 } else if (usize > ksize) {
294 int ret = check_zeroed_user(src + size, rest);
295 if (ret <= 0)
296 return ret ?: -E2BIG;
297 }
298
299 if (copy_from_user(dst, src, size))
300 return -EFAULT;
301 return 0;
302 }
303
304
305
306
307
308
309
310
311
312
313 extern long probe_kernel_read(void *dst, const void *src, size_t size);
314 extern long __probe_kernel_read(void *dst, const void *src, size_t size);
315
316
317
318
319
320
321
322
323
324
325 extern long probe_user_read(void *dst, const void __user *src, size_t size);
326 extern long __probe_user_read(void *dst, const void __user *src, size_t size);
327
328
329
330
331
332
333
334
335
336
337 extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
338 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
339
340
341
342
343
344
345
346
347
348
349 extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
350 extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
351
352 extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
353 extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
354 long count);
355 extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
356
357
358
359
360
361
362
363
364 #define probe_kernel_address(addr, retval) \
365 probe_kernel_read(&retval, addr, sizeof(retval))
366
367 #ifndef user_access_begin
368 #define user_access_begin(ptr,len) access_ok(ptr, len)
369 #define user_access_end() do { } while (0)
370 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
371 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
372 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
373 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
374 static inline unsigned long user_access_save(void) { return 0UL; }
375 static inline void user_access_restore(unsigned long flags) { }
376 #endif
377
378 #ifdef CONFIG_HARDENED_USERCOPY
379 void usercopy_warn(const char *name, const char *detail, bool to_user,
380 unsigned long offset, unsigned long len);
381 void __noreturn usercopy_abort(const char *name, const char *detail,
382 bool to_user, unsigned long offset,
383 unsigned long len);
384 #endif
385
386 #endif