root/arch/x86/lib/usercopy_64.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __clear_user
  2. clear_user
  3. mcsafe_handle_tail
  4. clean_cache_range
  5. arch_wb_cache_pmem
  6. __copy_user_flushcache
  7. __memcpy_flushcache
  8. memcpy_page_flushcache

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* 
   3  * User address space access functions.
   4  *
   5  * Copyright 1997 Andi Kleen <ak@muc.de>
   6  * Copyright 1997 Linus Torvalds
   7  * Copyright 2002 Andi Kleen <ak@suse.de>
   8  */
   9 #include <linux/export.h>
  10 #include <linux/uaccess.h>
  11 #include <linux/highmem.h>
  12 
  13 /*
  14  * Zero Userspace
  15  */
  16 
  17 unsigned long __clear_user(void __user *addr, unsigned long size)
  18 {
  19         long __d0;
  20         might_fault();
  21         /* no memory constraint because it doesn't change any memory gcc knows
  22            about */
  23         stac();
  24         asm volatile(
  25                 "       testq  %[size8],%[size8]\n"
  26                 "       jz     4f\n"
  27                 "0:     movq $0,(%[dst])\n"
  28                 "       addq   $8,%[dst]\n"
  29                 "       decl %%ecx ; jnz   0b\n"
  30                 "4:     movq  %[size1],%%rcx\n"
  31                 "       testl %%ecx,%%ecx\n"
  32                 "       jz     2f\n"
  33                 "1:     movb   $0,(%[dst])\n"
  34                 "       incq   %[dst]\n"
  35                 "       decl %%ecx ; jnz  1b\n"
  36                 "2:\n"
  37                 ".section .fixup,\"ax\"\n"
  38                 "3:     lea 0(%[size1],%[size8],8),%[size8]\n"
  39                 "       jmp 2b\n"
  40                 ".previous\n"
  41                 _ASM_EXTABLE_UA(0b, 3b)
  42                 _ASM_EXTABLE_UA(1b, 2b)
  43                 : [size8] "=&c"(size), [dst] "=&D" (__d0)
  44                 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
  45         clac();
  46         return size;
  47 }
  48 EXPORT_SYMBOL(__clear_user);
  49 
  50 unsigned long clear_user(void __user *to, unsigned long n)
  51 {
  52         if (access_ok(to, n))
  53                 return __clear_user(to, n);
  54         return n;
  55 }
  56 EXPORT_SYMBOL(clear_user);
  57 
  58 /*
  59  * Similar to copy_user_handle_tail, probe for the write fault point,
  60  * but reuse __memcpy_mcsafe in case a new read error is encountered.
  61  * clac() is handled in _copy_to_iter_mcsafe().
  62  */
  63 __visible notrace unsigned long
  64 mcsafe_handle_tail(char *to, char *from, unsigned len)
  65 {
  66         for (; len; --len, to++, from++) {
  67                 /*
  68                  * Call the assembly routine back directly since
  69                  * memcpy_mcsafe() may silently fallback to memcpy.
  70                  */
  71                 unsigned long rem = __memcpy_mcsafe(to, from, 1);
  72 
  73                 if (rem)
  74                         break;
  75         }
  76         return len;
  77 }
  78 
  79 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  80 /**
  81  * clean_cache_range - write back a cache range with CLWB
  82  * @vaddr:      virtual start address
  83  * @size:       number of bytes to write back
  84  *
  85  * Write back a cache range using the CLWB (cache line write back)
  86  * instruction. Note that @size is internally rounded up to be cache
  87  * line size aligned.
  88  */
  89 static void clean_cache_range(void *addr, size_t size)
  90 {
  91         u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
  92         unsigned long clflush_mask = x86_clflush_size - 1;
  93         void *vend = addr + size;
  94         void *p;
  95 
  96         for (p = (void *)((unsigned long)addr & ~clflush_mask);
  97              p < vend; p += x86_clflush_size)
  98                 clwb(p);
  99 }
 100 
 101 void arch_wb_cache_pmem(void *addr, size_t size)
 102 {
 103         clean_cache_range(addr, size);
 104 }
 105 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 106 
 107 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
 108 {
 109         unsigned long flushed, dest = (unsigned long) dst;
 110         long rc = __copy_user_nocache(dst, src, size, 0);
 111 
 112         /*
 113          * __copy_user_nocache() uses non-temporal stores for the bulk
 114          * of the transfer, but we need to manually flush if the
 115          * transfer is unaligned. A cached memory copy is used when
 116          * destination or size is not naturally aligned. That is:
 117          *   - Require 8-byte alignment when size is 8 bytes or larger.
 118          *   - Require 4-byte alignment when size is 4 bytes.
 119          */
 120         if (size < 8) {
 121                 if (!IS_ALIGNED(dest, 4) || size != 4)
 122                         clean_cache_range(dst, 1);
 123         } else {
 124                 if (!IS_ALIGNED(dest, 8)) {
 125                         dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
 126                         clean_cache_range(dst, 1);
 127                 }
 128 
 129                 flushed = dest - (unsigned long) dst;
 130                 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
 131                         clean_cache_range(dst + size - 1, 1);
 132         }
 133 
 134         return rc;
 135 }
 136 
 137 void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
 138 {
 139         unsigned long dest = (unsigned long) _dst;
 140         unsigned long source = (unsigned long) _src;
 141 
 142         /* cache copy and flush to align dest */
 143         if (!IS_ALIGNED(dest, 8)) {
 144                 unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
 145 
 146                 memcpy((void *) dest, (void *) source, len);
 147                 clean_cache_range((void *) dest, len);
 148                 dest += len;
 149                 source += len;
 150                 size -= len;
 151                 if (!size)
 152                         return;
 153         }
 154 
 155         /* 4x8 movnti loop */
 156         while (size >= 32) {
 157                 asm("movq    (%0), %%r8\n"
 158                     "movq   8(%0), %%r9\n"
 159                     "movq  16(%0), %%r10\n"
 160                     "movq  24(%0), %%r11\n"
 161                     "movnti  %%r8,   (%1)\n"
 162                     "movnti  %%r9,  8(%1)\n"
 163                     "movnti %%r10, 16(%1)\n"
 164                     "movnti %%r11, 24(%1)\n"
 165                     :: "r" (source), "r" (dest)
 166                     : "memory", "r8", "r9", "r10", "r11");
 167                 dest += 32;
 168                 source += 32;
 169                 size -= 32;
 170         }
 171 
 172         /* 1x8 movnti loop */
 173         while (size >= 8) {
 174                 asm("movq    (%0), %%r8\n"
 175                     "movnti  %%r8,   (%1)\n"
 176                     :: "r" (source), "r" (dest)
 177                     : "memory", "r8");
 178                 dest += 8;
 179                 source += 8;
 180                 size -= 8;
 181         }
 182 
 183         /* 1x4 movnti loop */
 184         while (size >= 4) {
 185                 asm("movl    (%0), %%r8d\n"
 186                     "movnti  %%r8d,   (%1)\n"
 187                     :: "r" (source), "r" (dest)
 188                     : "memory", "r8");
 189                 dest += 4;
 190                 source += 4;
 191                 size -= 4;
 192         }
 193 
 194         /* cache copy for remaining bytes */
 195         if (size) {
 196                 memcpy((void *) dest, (void *) source, size);
 197                 clean_cache_range((void *) dest, size);
 198         }
 199 }
 200 EXPORT_SYMBOL_GPL(__memcpy_flushcache);
 201 
 202 void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
 203                 size_t len)
 204 {
 205         char *from = kmap_atomic(page);
 206 
 207         memcpy_flushcache(to, from + offset, len);
 208         kunmap_atomic(from);
 209 }
 210 #endif

/* [<][>][^][v][top][bottom][index][help] */