1#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <asm/asm-compat.h>
10#include <asm/processor.h>
11#include <asm/page.h>
12
13#define VERIFY_READ	0
14#define VERIFY_WRITE	1
15
16/*
17 * The fs value determines whether argument validity checking should be
18 * performed or not.  If get_fs() == USER_DS, checking is performed, with
19 * get_fs() == KERNEL_DS, checking is bypassed.
20 *
21 * For historical reasons, these macros are grossly misnamed.
22 *
23 * The fs/ds values are now the highest legal address in the "segment".
24 * This simplifies the checking in the routines below.
25 */
26
27#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
28
29#define KERNEL_DS	MAKE_MM_SEG(~0UL)
30#ifdef __powerpc64__
31/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
32#define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
33#else
34#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
35#endif
36
37#define get_ds()	(KERNEL_DS)
38#define get_fs()	(current->thread.fs)
39#define set_fs(val)	(current->thread.fs = (val))
40
41#define segment_eq(a, b)	((a).seg == (b).seg)
42
43#define user_addr_max()	(get_fs().seg)
44
45#ifdef __powerpc64__
46/*
47 * This check is sufficient because there is a large enough
48 * gap between user addresses and the kernel addresses
49 */
50#define __access_ok(addr, size, segment)	\
51	(((addr) <= (segment).seg) && ((size) <= (segment).seg))
52
53#else
54
55#define __access_ok(addr, size, segment)	\
56	(((addr) <= (segment).seg) &&		\
57	 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
58
59#endif
60
61#define access_ok(type, addr, size)		\
62	(__chk_user_ptr(addr),			\
63	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
64
65/*
66 * The exception table consists of pairs of addresses: the first is the
67 * address of an instruction that is allowed to fault, and the second is
68 * the address at which the program should continue.  No registers are
69 * modified, so it is entirely up to the continuation code to figure out
70 * what to do.
71 *
72 * All the routines below use bits of fixup code that are out of line
73 * with the main instruction path.  This means when everything is well,
74 * we don't even have to jump over them.  Further, they do not intrude
75 * on our cache or tlb entries.
76 */
77
78struct exception_table_entry {
79	unsigned long insn;
80	unsigned long fixup;
81};
82
83/*
84 * These are the main single-value transfer routines.  They automatically
85 * use the right size if we just have the right pointer type.
86 *
87 * This gets kind of ugly. We want to return _two_ values in "get_user()"
88 * and yet we don't want to do any pointers, because that is too much
89 * of a performance impact. Thus we have a few rather ugly macros here,
90 * and hide all the ugliness from the user.
91 *
92 * The "__xxx" versions of the user access functions are versions that
93 * do not verify the address space, that must have been done previously
94 * with a separate "access_ok()" call (this is used when we do multiple
95 * accesses to the same area of user memory).
96 *
97 * As we use the same address space for kernel and user data on the
98 * PowerPC, we can just do these as direct assignments.  (Of course, the
99 * exception handling means that it's no longer "just"...)
100 *
101 */
102#define get_user(x, ptr) \
103	__get_user_check((x), (ptr), sizeof(*(ptr)))
104#define put_user(x, ptr) \
105	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
106
107#define __get_user(x, ptr) \
108	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
109#define __put_user(x, ptr) \
110	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
111
112#define __get_user_inatomic(x, ptr) \
113	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
114#define __put_user_inatomic(x, ptr) \
115	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
116
117#define __get_user_unaligned __get_user
118#define __put_user_unaligned __put_user
119
120extern long __put_user_bad(void);
121
122/*
123 * We don't tell gcc that we are accessing memory, but this is OK
124 * because we do not write to any memory gcc knows about, so there
125 * are no aliasing issues.
126 */
127#define __put_user_asm(x, addr, err, op)			\
128	__asm__ __volatile__(					\
129		"1:	" op " %1,0(%2)	# put_user\n"		\
130		"2:\n"						\
131		".section .fixup,\"ax\"\n"			\
132		"3:	li %0,%3\n"				\
133		"	b 2b\n"					\
134		".previous\n"					\
135		".section __ex_table,\"a\"\n"			\
136			PPC_LONG_ALIGN "\n"			\
137			PPC_LONG "1b,3b\n"			\
138		".previous"					\
139		: "=r" (err)					\
140		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
141
142#ifdef __powerpc64__
143#define __put_user_asm2(x, ptr, retval)				\
144	  __put_user_asm(x, ptr, retval, "std")
145#else /* __powerpc64__ */
146#define __put_user_asm2(x, addr, err)				\
147	__asm__ __volatile__(					\
148		"1:	stw %1,0(%2)\n"				\
149		"2:	stw %1+1,4(%2)\n"			\
150		"3:\n"						\
151		".section .fixup,\"ax\"\n"			\
152		"4:	li %0,%3\n"				\
153		"	b 3b\n"					\
154		".previous\n"					\
155		".section __ex_table,\"a\"\n"			\
156			PPC_LONG_ALIGN "\n"			\
157			PPC_LONG "1b,4b\n"			\
158			PPC_LONG "2b,4b\n"			\
159		".previous"					\
160		: "=r" (err)					\
161		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
162#endif /* __powerpc64__ */
163
164#define __put_user_size(x, ptr, size, retval)			\
165do {								\
166	retval = 0;						\
167	switch (size) {						\
168	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
169	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
170	  case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\
171	  case 8: __put_user_asm2(x, ptr, retval); break;	\
172	  default: __put_user_bad();				\
173	}							\
174} while (0)
175
176#define __put_user_nocheck(x, ptr, size)			\
177({								\
178	long __pu_err;						\
179	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
180	if (!is_kernel_addr((unsigned long)__pu_addr))		\
181		might_fault();					\
182	__chk_user_ptr(ptr);					\
183	__put_user_size((x), __pu_addr, (size), __pu_err);	\
184	__pu_err;						\
185})
186
187#define __put_user_check(x, ptr, size)					\
188({									\
189	long __pu_err = -EFAULT;					\
190	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
191	might_fault();							\
192	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
193		__put_user_size((x), __pu_addr, (size), __pu_err);	\
194	__pu_err;							\
195})
196
197#define __put_user_nosleep(x, ptr, size)			\
198({								\
199	long __pu_err;						\
200	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
201	__chk_user_ptr(ptr);					\
202	__put_user_size((x), __pu_addr, (size), __pu_err);	\
203	__pu_err;						\
204})
205
206
207extern long __get_user_bad(void);
208
209#define __get_user_asm(x, addr, err, op)		\
210	__asm__ __volatile__(				\
211		"1:	"op" %1,0(%2)	# get_user\n"	\
212		"2:\n"					\
213		".section .fixup,\"ax\"\n"		\
214		"3:	li %0,%3\n"			\
215		"	li %1,0\n"			\
216		"	b 2b\n"				\
217		".previous\n"				\
218		".section __ex_table,\"a\"\n"		\
219			PPC_LONG_ALIGN "\n"		\
220			PPC_LONG "1b,3b\n"		\
221		".previous"				\
222		: "=r" (err), "=r" (x)			\
223		: "b" (addr), "i" (-EFAULT), "0" (err))
224
225#ifdef __powerpc64__
226#define __get_user_asm2(x, addr, err)			\
227	__get_user_asm(x, addr, err, "ld")
228#else /* __powerpc64__ */
229#define __get_user_asm2(x, addr, err)			\
230	__asm__ __volatile__(				\
231		"1:	lwz %1,0(%2)\n"			\
232		"2:	lwz %1+1,4(%2)\n"		\
233		"3:\n"					\
234		".section .fixup,\"ax\"\n"		\
235		"4:	li %0,%3\n"			\
236		"	li %1,0\n"			\
237		"	li %1+1,0\n"			\
238		"	b 3b\n"				\
239		".previous\n"				\
240		".section __ex_table,\"a\"\n"		\
241			PPC_LONG_ALIGN "\n"		\
242			PPC_LONG "1b,4b\n"		\
243			PPC_LONG "2b,4b\n"		\
244		".previous"				\
245		: "=r" (err), "=&r" (x)			\
246		: "b" (addr), "i" (-EFAULT), "0" (err))
247#endif /* __powerpc64__ */
248
249#define __get_user_size(x, ptr, size, retval)			\
250do {								\
251	retval = 0;						\
252	__chk_user_ptr(ptr);					\
253	if (size > sizeof(x))					\
254		(x) = __get_user_bad();				\
255	switch (size) {						\
256	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
257	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
258	case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\
259	case 8: __get_user_asm2(x, ptr, retval);  break;	\
260	default: (x) = __get_user_bad();			\
261	}							\
262} while (0)
263
264#define __get_user_nocheck(x, ptr, size)			\
265({								\
266	long __gu_err;						\
267	unsigned long __gu_val;					\
268	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
269	__chk_user_ptr(ptr);					\
270	if (!is_kernel_addr((unsigned long)__gu_addr))		\
271		might_fault();					\
272	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
273	(x) = (__typeof__(*(ptr)))__gu_val;			\
274	__gu_err;						\
275})
276
277#ifndef __powerpc64__
278#define __get_user64_nocheck(x, ptr, size)			\
279({								\
280	long __gu_err;						\
281	long long __gu_val;					\
282	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
283	__chk_user_ptr(ptr);					\
284	if (!is_kernel_addr((unsigned long)__gu_addr))		\
285		might_fault();					\
286	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
287	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
288	__gu_err;						\
289})
290#endif /* __powerpc64__ */
291
292#define __get_user_check(x, ptr, size)					\
293({									\
294	long __gu_err = -EFAULT;					\
295	unsigned long  __gu_val = 0;					\
296	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
297	might_fault();							\
298	if (access_ok(VERIFY_READ, __gu_addr, (size)))			\
299		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
300	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
301	__gu_err;							\
302})
303
304#define __get_user_nosleep(x, ptr, size)			\
305({								\
306	long __gu_err;						\
307	unsigned long __gu_val;					\
308	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
309	__chk_user_ptr(ptr);					\
310	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
311	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
312	__gu_err;						\
313})
314
315
316/* more complex routines */
317
318extern unsigned long __copy_tofrom_user(void __user *to,
319		const void __user *from, unsigned long size);
320
321#ifndef __powerpc64__
322
323static inline unsigned long copy_from_user(void *to,
324		const void __user *from, unsigned long n)
325{
326	unsigned long over;
327
328	if (access_ok(VERIFY_READ, from, n))
329		return __copy_tofrom_user((__force void __user *)to, from, n);
330	if ((unsigned long)from < TASK_SIZE) {
331		over = (unsigned long)from + n - TASK_SIZE;
332		return __copy_tofrom_user((__force void __user *)to, from,
333				n - over) + over;
334	}
335	return n;
336}
337
338static inline unsigned long copy_to_user(void __user *to,
339		const void *from, unsigned long n)
340{
341	unsigned long over;
342
343	if (access_ok(VERIFY_WRITE, to, n))
344		return __copy_tofrom_user(to, (__force void __user *)from, n);
345	if ((unsigned long)to < TASK_SIZE) {
346		over = (unsigned long)to + n - TASK_SIZE;
347		return __copy_tofrom_user(to, (__force void __user *)from,
348				n - over) + over;
349	}
350	return n;
351}
352
353#else /* __powerpc64__ */
354
355#define __copy_in_user(to, from, size) \
356	__copy_tofrom_user((to), (from), (size))
357
358extern unsigned long copy_from_user(void *to, const void __user *from,
359				    unsigned long n);
360extern unsigned long copy_to_user(void __user *to, const void *from,
361				  unsigned long n);
362extern unsigned long copy_in_user(void __user *to, const void __user *from,
363				  unsigned long n);
364
365#endif /* __powerpc64__ */
366
367static inline unsigned long __copy_from_user_inatomic(void *to,
368		const void __user *from, unsigned long n)
369{
370	if (__builtin_constant_p(n) && (n <= 8)) {
371		unsigned long ret = 1;
372
373		switch (n) {
374		case 1:
375			__get_user_size(*(u8 *)to, from, 1, ret);
376			break;
377		case 2:
378			__get_user_size(*(u16 *)to, from, 2, ret);
379			break;
380		case 4:
381			__get_user_size(*(u32 *)to, from, 4, ret);
382			break;
383		case 8:
384			__get_user_size(*(u64 *)to, from, 8, ret);
385			break;
386		}
387		if (ret == 0)
388			return 0;
389	}
390	return __copy_tofrom_user((__force void __user *)to, from, n);
391}
392
393static inline unsigned long __copy_to_user_inatomic(void __user *to,
394		const void *from, unsigned long n)
395{
396	if (__builtin_constant_p(n) && (n <= 8)) {
397		unsigned long ret = 1;
398
399		switch (n) {
400		case 1:
401			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
402			break;
403		case 2:
404			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
405			break;
406		case 4:
407			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
408			break;
409		case 8:
410			__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
411			break;
412		}
413		if (ret == 0)
414			return 0;
415	}
416	return __copy_tofrom_user(to, (__force const void __user *)from, n);
417}
418
419static inline unsigned long __copy_from_user(void *to,
420		const void __user *from, unsigned long size)
421{
422	might_fault();
423	return __copy_from_user_inatomic(to, from, size);
424}
425
426static inline unsigned long __copy_to_user(void __user *to,
427		const void *from, unsigned long size)
428{
429	might_fault();
430	return __copy_to_user_inatomic(to, from, size);
431}
432
433extern unsigned long __clear_user(void __user *addr, unsigned long size);
434
435static inline unsigned long clear_user(void __user *addr, unsigned long size)
436{
437	might_fault();
438	if (likely(access_ok(VERIFY_WRITE, addr, size)))
439		return __clear_user(addr, size);
440	if ((unsigned long)addr < TASK_SIZE) {
441		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
442		return __clear_user(addr, size - over) + over;
443	}
444	return size;
445}
446
447extern long strncpy_from_user(char *dst, const char __user *src, long count);
448extern __must_check long strlen_user(const char __user *str);
449extern __must_check long strnlen_user(const char __user *str, long n);
450
451#endif  /* __ASSEMBLY__ */
452#endif /* __KERNEL__ */
453
454#endif	/* _ARCH_POWERPC_UACCESS_H */
455