root/arch/openrisc/include/asm/uaccess.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. raw_copy_from_user
  2. raw_copy_to_user
  3. clear_user

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * OpenRISC Linux
   4  *
   5  * Linux architectural port borrowing liberally from similar works of
   6  * others.  All original copyrights apply as per the original source
   7  * declaration.
   8  *
   9  * OpenRISC implementation:
  10  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  11  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  12  * et al.
  13  */
  14 
  15 #ifndef __ASM_OPENRISC_UACCESS_H
  16 #define __ASM_OPENRISC_UACCESS_H
  17 
  18 /*
  19  * User space memory access functions
  20  */
  21 #include <linux/prefetch.h>
  22 #include <linux/string.h>
  23 #include <asm/page.h>
  24 #include <asm/extable.h>
  25 
  26 /*
  27  * The fs value determines whether argument validity checking should be
  28  * performed or not.  If get_fs() == USER_DS, checking is performed, with
  29  * get_fs() == KERNEL_DS, checking is bypassed.
  30  *
  31  * For historical reasons, these macros are grossly misnamed.
  32  */
  33 
  34 /* addr_limit is the maximum accessible address for the task. we misuse
  35  * the KERNEL_DS and USER_DS values to both assign and compare the
  36  * addr_limit values through the equally misnamed get/set_fs macros.
  37  * (see above)
  38  */
  39 
  40 #define KERNEL_DS       (~0UL)
  41 
  42 #define USER_DS         (TASK_SIZE)
  43 #define get_fs()        (current_thread_info()->addr_limit)
  44 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
  45 
  46 #define segment_eq(a, b)        ((a) == (b))
  47 
  48 /* Ensure that the range from addr to addr+size is all within the process'
  49  * address space
  50  */
  51 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
  52 
  53 /* Ensure that addr is below task's addr_limit */
  54 #define __addr_ok(addr) ((unsigned long) addr < get_fs())
  55 
  56 #define access_ok(addr, size)                                           \
  57 ({                                                                      \
  58         unsigned long __ao_addr = (unsigned long)(addr);                \
  59         unsigned long __ao_size = (unsigned long)(size);                \
  60         __range_ok(__ao_addr, __ao_size);                               \
  61 })
  62 
  63 /*
  64  * These are the main single-value transfer routines.  They automatically
  65  * use the right size if we just have the right pointer type.
  66  *
  67  * This gets kind of ugly. We want to return _two_ values in "get_user()"
  68  * and yet we don't want to do any pointers, because that is too much
  69  * of a performance impact. Thus we have a few rather ugly macros here,
  70  * and hide all the uglyness from the user.
  71  *
  72  * The "__xxx" versions of the user access functions are versions that
  73  * do not verify the address space, that must have been done previously
  74  * with a separate "access_ok()" call (this is used when we do multiple
  75  * accesses to the same area of user memory).
  76  *
  77  * As we use the same address space for kernel and user data on the
  78  * PowerPC, we can just do these as direct assignments.  (Of course, the
  79  * exception handling means that it's no longer "just"...)
  80  */
  81 #define get_user(x, ptr) \
  82         __get_user_check((x), (ptr), sizeof(*(ptr)))
  83 #define put_user(x, ptr) \
  84         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  85 
  86 #define __get_user(x, ptr) \
  87         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  88 #define __put_user(x, ptr) \
  89         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  90 
  91 extern long __put_user_bad(void);
  92 
  93 #define __put_user_nocheck(x, ptr, size)                \
  94 ({                                                      \
  95         long __pu_err;                                  \
  96         __put_user_size((x), (ptr), (size), __pu_err);  \
  97         __pu_err;                                       \
  98 })
  99 
 100 #define __put_user_check(x, ptr, size)                                  \
 101 ({                                                                      \
 102         long __pu_err = -EFAULT;                                        \
 103         __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
 104         if (access_ok(__pu_addr, size))                 \
 105                 __put_user_size((x), __pu_addr, (size), __pu_err);      \
 106         __pu_err;                                                       \
 107 })
 108 
 109 #define __put_user_size(x, ptr, size, retval)                           \
 110 do {                                                                    \
 111         retval = 0;                                                     \
 112         switch (size) {                                                 \
 113         case 1: __put_user_asm(x, ptr, retval, "l.sb"); break;          \
 114         case 2: __put_user_asm(x, ptr, retval, "l.sh"); break;          \
 115         case 4: __put_user_asm(x, ptr, retval, "l.sw"); break;          \
 116         case 8: __put_user_asm2(x, ptr, retval); break;                 \
 117         default: __put_user_bad();                                      \
 118         }                                                               \
 119 } while (0)
 120 
 121 struct __large_struct {
 122         unsigned long buf[100];
 123 };
 124 #define __m(x) (*(struct __large_struct *)(x))
 125 
 126 /*
 127  * We don't tell gcc that we are accessing memory, but this is OK
 128  * because we do not write to any memory gcc knows about, so there
 129  * are no aliasing issues.
 130  */
 131 #define __put_user_asm(x, addr, err, op)                        \
 132         __asm__ __volatile__(                                   \
 133                 "1:     "op" 0(%2),%1\n"                        \
 134                 "2:\n"                                          \
 135                 ".section .fixup,\"ax\"\n"                      \
 136                 "3:     l.addi %0,r0,%3\n"                      \
 137                 "       l.j 2b\n"                               \
 138                 "       l.nop\n"                                \
 139                 ".previous\n"                                   \
 140                 ".section __ex_table,\"a\"\n"                   \
 141                 "       .align 2\n"                             \
 142                 "       .long 1b,3b\n"                          \
 143                 ".previous"                                     \
 144                 : "=r"(err)                                     \
 145                 : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
 146 
 147 #define __put_user_asm2(x, addr, err)                           \
 148         __asm__ __volatile__(                                   \
 149                 "1:     l.sw 0(%2),%1\n"                        \
 150                 "2:     l.sw 4(%2),%H1\n"                       \
 151                 "3:\n"                                          \
 152                 ".section .fixup,\"ax\"\n"                      \
 153                 "4:     l.addi %0,r0,%3\n"                      \
 154                 "       l.j 3b\n"                               \
 155                 "       l.nop\n"                                \
 156                 ".previous\n"                                   \
 157                 ".section __ex_table,\"a\"\n"                   \
 158                 "       .align 2\n"                             \
 159                 "       .long 1b,4b\n"                          \
 160                 "       .long 2b,4b\n"                          \
 161                 ".previous"                                     \
 162                 : "=r"(err)                                     \
 163                 : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
 164 
 165 #define __get_user_nocheck(x, ptr, size)                        \
 166 ({                                                              \
 167         long __gu_err, __gu_val;                                \
 168         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
 169         (x) = (__force __typeof__(*(ptr)))__gu_val;             \
 170         __gu_err;                                               \
 171 })
 172 
 173 #define __get_user_check(x, ptr, size)                                  \
 174 ({                                                                      \
 175         long __gu_err = -EFAULT, __gu_val = 0;                          \
 176         const __typeof__(*(ptr)) * __gu_addr = (ptr);                   \
 177         if (access_ok(__gu_addr, size))                 \
 178                 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
 179         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 180         __gu_err;                                                       \
 181 })
 182 
 183 extern long __get_user_bad(void);
 184 
 185 #define __get_user_size(x, ptr, size, retval)                           \
 186 do {                                                                    \
 187         retval = 0;                                                     \
 188         switch (size) {                                                 \
 189         case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;         \
 190         case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;         \
 191         case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;         \
 192         case 8: __get_user_asm2(x, ptr, retval); break;                 \
 193         default: (x) = __get_user_bad();                                \
 194         }                                                               \
 195 } while (0)
 196 
 197 #define __get_user_asm(x, addr, err, op)                \
 198         __asm__ __volatile__(                           \
 199                 "1:     "op" %1,0(%2)\n"                \
 200                 "2:\n"                                  \
 201                 ".section .fixup,\"ax\"\n"              \
 202                 "3:     l.addi %0,r0,%3\n"              \
 203                 "       l.addi %1,r0,0\n"               \
 204                 "       l.j 2b\n"                       \
 205                 "       l.nop\n"                        \
 206                 ".previous\n"                           \
 207                 ".section __ex_table,\"a\"\n"           \
 208                 "       .align 2\n"                     \
 209                 "       .long 1b,3b\n"                  \
 210                 ".previous"                             \
 211                 : "=r"(err), "=r"(x)                    \
 212                 : "r"(addr), "i"(-EFAULT), "0"(err))
 213 
 214 #define __get_user_asm2(x, addr, err)                   \
 215         __asm__ __volatile__(                           \
 216                 "1:     l.lwz %1,0(%2)\n"               \
 217                 "2:     l.lwz %H1,4(%2)\n"              \
 218                 "3:\n"                                  \
 219                 ".section .fixup,\"ax\"\n"              \
 220                 "4:     l.addi %0,r0,%3\n"              \
 221                 "       l.addi %1,r0,0\n"               \
 222                 "       l.addi %H1,r0,0\n"              \
 223                 "       l.j 3b\n"                       \
 224                 "       l.nop\n"                        \
 225                 ".previous\n"                           \
 226                 ".section __ex_table,\"a\"\n"           \
 227                 "       .align 2\n"                     \
 228                 "       .long 1b,4b\n"                  \
 229                 "       .long 2b,4b\n"                  \
 230                 ".previous"                             \
 231                 : "=r"(err), "=&r"(x)                   \
 232                 : "r"(addr), "i"(-EFAULT), "0"(err))
 233 
 234 /* more complex routines */
 235 
 236 extern unsigned long __must_check
 237 __copy_tofrom_user(void *to, const void *from, unsigned long size);
 238 static inline unsigned long
 239 raw_copy_from_user(void *to, const void __user *from, unsigned long size)
 240 {
 241         return __copy_tofrom_user(to, (__force const void *)from, size);
 242 }
 243 static inline unsigned long
 244 raw_copy_to_user(void *to, const void __user *from, unsigned long size)
 245 {
 246         return __copy_tofrom_user((__force void *)to, from, size);
 247 }
 248 #define INLINE_COPY_FROM_USER
 249 #define INLINE_COPY_TO_USER
 250 
 251 extern unsigned long __clear_user(void *addr, unsigned long size);
 252 
 253 static inline __must_check unsigned long
 254 clear_user(void *addr, unsigned long size)
 255 {
 256         if (likely(access_ok(addr, size)))
 257                 size = __clear_user(addr, size);
 258         return size;
 259 }
 260 
 261 #define user_addr_max() \
 262         (uaccess_kernel() ? ~0UL : TASK_SIZE)
 263 
 264 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 265 
 266 extern __must_check long strnlen_user(const char __user *str, long n);
 267 
 268 #endif /* __ASM_OPENRISC_UACCESS_H */

/* [<][>][^][v][top][bottom][index][help] */