1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
11
12 /*
13 * User space memory access functions
14 */
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <asm/ctl_reg.h>
18
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
21
22
23 /*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
36
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->thread.mm_segment)
39
40 #define set_fs(x) \
41 ({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46 __ctl_load(__pto, 7, 7); \
47 })
48
49 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
__range_ok(unsigned long addr,unsigned long size)51 static inline int __range_ok(unsigned long addr, unsigned long size)
52 {
53 return 1;
54 }
55
56 #define __access_ok(addr, size) \
57 ({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
60 })
61
62 #define access_ok(type, addr, size) __access_ok(addr, size)
63
64 /*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77 struct exception_table_entry
78 {
79 int insn, fixup;
80 };
81
extable_insn(const struct exception_table_entry * x)82 static inline unsigned long extable_insn(const struct exception_table_entry *x)
83 {
84 return (unsigned long)&x->insn + x->insn;
85 }
86
extable_fixup(const struct exception_table_entry * x)87 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88 {
89 return (unsigned long)&x->fixup + x->fixup;
90 }
91
92 #define ARCH_HAS_SORT_EXTABLE
93 #define ARCH_HAS_SEARCH_EXTABLE
94
95 /**
96 * __copy_from_user: - Copy a block of data from user space, with less checking.
97 * @to: Destination address, in kernel space.
98 * @from: Source address, in user space.
99 * @n: Number of bytes to copy.
100 *
101 * Context: User context only. This function may sleep.
102 *
103 * Copy data from user space to kernel space. Caller must check
104 * the specified block with access_ok() before calling this function.
105 *
106 * Returns number of bytes that could not be copied.
107 * On success, this will be zero.
108 *
109 * If some data could not be copied, this function will pad the copied
110 * data to the requested size using zero bytes.
111 */
112 unsigned long __must_check __copy_from_user(void *to, const void __user *from,
113 unsigned long n);
114
115 /**
116 * __copy_to_user: - Copy a block of data into user space, with less checking.
117 * @to: Destination address, in user space.
118 * @from: Source address, in kernel space.
119 * @n: Number of bytes to copy.
120 *
121 * Context: User context only. This function may sleep.
122 *
123 * Copy data from kernel space to user space. Caller must check
124 * the specified block with access_ok() before calling this function.
125 *
126 * Returns number of bytes that could not be copied.
127 * On success, this will be zero.
128 */
129 unsigned long __must_check __copy_to_user(void __user *to, const void *from,
130 unsigned long n);
131
132 #define __copy_to_user_inatomic __copy_to_user
133 #define __copy_from_user_inatomic __copy_from_user
134
135 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
136
137 #define __put_get_user_asm(to, from, size, spec) \
138 ({ \
139 register unsigned long __reg0 asm("0") = spec; \
140 int __rc; \
141 \
142 asm volatile( \
143 "0: mvcos %1,%3,%2\n" \
144 "1: xr %0,%0\n" \
145 "2:\n" \
146 ".pushsection .fixup, \"ax\"\n" \
147 "3: lhi %0,%5\n" \
148 " jg 2b\n" \
149 ".popsection\n" \
150 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
151 : "=d" (__rc), "=Q" (*(to)) \
152 : "d" (size), "Q" (*(from)), \
153 "d" (__reg0), "K" (-EFAULT) \
154 : "cc"); \
155 __rc; \
156 })
157
158 #define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL)
159 #define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL)
160
161 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
162
__put_user_fn(void * x,void __user * ptr,unsigned long size)163 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
164 {
165 size = __copy_to_user(ptr, x, size);
166 return size ? -EFAULT : 0;
167 }
168
__get_user_fn(void * x,const void __user * ptr,unsigned long size)169 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
170 {
171 size = __copy_from_user(x, ptr, size);
172 return size ? -EFAULT : 0;
173 }
174
175 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
176
177 /*
178 * These are the main single-value transfer routines. They automatically
179 * use the right size if we just have the right pointer type.
180 */
181 #define __put_user(x, ptr) \
182 ({ \
183 __typeof__(*(ptr)) __x = (x); \
184 int __pu_err = -EFAULT; \
185 __chk_user_ptr(ptr); \
186 switch (sizeof (*(ptr))) { \
187 case 1: \
188 case 2: \
189 case 4: \
190 case 8: \
191 __pu_err = __put_user_fn(&__x, ptr, \
192 sizeof(*(ptr))); \
193 break; \
194 default: \
195 __put_user_bad(); \
196 break; \
197 } \
198 __pu_err; \
199 })
200
201 #define put_user(x, ptr) \
202 ({ \
203 might_fault(); \
204 __put_user(x, ptr); \
205 })
206
207
208 int __put_user_bad(void) __attribute__((noreturn));
209
210 #define __get_user(x, ptr) \
211 ({ \
212 int __gu_err = -EFAULT; \
213 __chk_user_ptr(ptr); \
214 switch (sizeof(*(ptr))) { \
215 case 1: { \
216 unsigned char __x; \
217 __gu_err = __get_user_fn(&__x, ptr, \
218 sizeof(*(ptr))); \
219 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
220 break; \
221 }; \
222 case 2: { \
223 unsigned short __x; \
224 __gu_err = __get_user_fn(&__x, ptr, \
225 sizeof(*(ptr))); \
226 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
227 break; \
228 }; \
229 case 4: { \
230 unsigned int __x; \
231 __gu_err = __get_user_fn(&__x, ptr, \
232 sizeof(*(ptr))); \
233 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
234 break; \
235 }; \
236 case 8: { \
237 unsigned long long __x; \
238 __gu_err = __get_user_fn(&__x, ptr, \
239 sizeof(*(ptr))); \
240 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
241 break; \
242 }; \
243 default: \
244 __get_user_bad(); \
245 break; \
246 } \
247 __gu_err; \
248 })
249
250 #define get_user(x, ptr) \
251 ({ \
252 might_fault(); \
253 __get_user(x, ptr); \
254 })
255
256 int __get_user_bad(void) __attribute__((noreturn));
257
258 #define __put_user_unaligned __put_user
259 #define __get_user_unaligned __get_user
260
261 /**
262 * copy_to_user: - Copy a block of data into user space.
263 * @to: Destination address, in user space.
264 * @from: Source address, in kernel space.
265 * @n: Number of bytes to copy.
266 *
267 * Context: User context only. This function may sleep.
268 *
269 * Copy data from kernel space to user space.
270 *
271 * Returns number of bytes that could not be copied.
272 * On success, this will be zero.
273 */
274 static inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)275 copy_to_user(void __user *to, const void *from, unsigned long n)
276 {
277 might_fault();
278 return __copy_to_user(to, from, n);
279 }
280
281 void copy_from_user_overflow(void)
282 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
283 __compiletime_warning("copy_from_user() buffer size is not provably correct")
284 #endif
285 ;
286
287 /**
288 * copy_from_user: - Copy a block of data from user space.
289 * @to: Destination address, in kernel space.
290 * @from: Source address, in user space.
291 * @n: Number of bytes to copy.
292 *
293 * Context: User context only. This function may sleep.
294 *
295 * Copy data from user space to kernel space.
296 *
297 * Returns number of bytes that could not be copied.
298 * On success, this will be zero.
299 *
300 * If some data could not be copied, this function will pad the copied
301 * data to the requested size using zero bytes.
302 */
303 static inline unsigned long __must_check
copy_from_user(void * to,const void __user * from,unsigned long n)304 copy_from_user(void *to, const void __user *from, unsigned long n)
305 {
306 unsigned int sz = __compiletime_object_size(to);
307
308 might_fault();
309 if (unlikely(sz != -1 && sz < n)) {
310 copy_from_user_overflow();
311 return n;
312 }
313 return __copy_from_user(to, from, n);
314 }
315
316 unsigned long __must_check
317 __copy_in_user(void __user *to, const void __user *from, unsigned long n);
318
319 static inline unsigned long __must_check
copy_in_user(void __user * to,const void __user * from,unsigned long n)320 copy_in_user(void __user *to, const void __user *from, unsigned long n)
321 {
322 might_fault();
323 return __copy_in_user(to, from, n);
324 }
325
326 /*
327 * Copy a null terminated string from userspace.
328 */
329
330 long __strncpy_from_user(char *dst, const char __user *src, long count);
331
332 static inline long __must_check
strncpy_from_user(char * dst,const char __user * src,long count)333 strncpy_from_user(char *dst, const char __user *src, long count)
334 {
335 might_fault();
336 return __strncpy_from_user(dst, src, count);
337 }
338
339 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
340
strnlen_user(const char __user * src,unsigned long n)341 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
342 {
343 might_fault();
344 return __strnlen_user(src, n);
345 }
346
347 /**
348 * strlen_user: - Get the size of a string in user space.
349 * @str: The string to measure.
350 *
351 * Context: User context only. This function may sleep.
352 *
353 * Get the size of a NUL-terminated string in user space.
354 *
355 * Returns the size of the string INCLUDING the terminating NUL.
356 * On exception, returns 0.
357 *
358 * If there is a limit on the length of a valid string, you may wish to
359 * consider using strnlen_user() instead.
360 */
361 #define strlen_user(str) strnlen_user(str, ~0UL)
362
363 /*
364 * Zero Userspace
365 */
366 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
367
clear_user(void __user * to,unsigned long n)368 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
369 {
370 might_fault();
371 return __clear_user(to, n);
372 }
373
374 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
375 void s390_kernel_write(void *dst, const void *src, size_t size);
376
377 #endif /* __S390_UACCESS_H */
378