root/arch/powerpc/include/asm/cacheflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. flush_cache_vmap
  2. flush_cache_vmap
  3. flush_dcache_range
  4. clean_dcache_range
  5. invalidate_dcache_range

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  */
   4 #ifndef _ASM_POWERPC_CACHEFLUSH_H
   5 #define _ASM_POWERPC_CACHEFLUSH_H
   6 
   7 #ifdef __KERNEL__
   8 
   9 #include <linux/mm.h>
  10 #include <asm/cputable.h>
  11 
  12 /*
  13  * No cache flushing is required when address mappings are changed,
  14  * because the caches on PowerPCs are physically addressed.
  15  */
  16 #define flush_cache_all()                       do { } while (0)
  17 #define flush_cache_mm(mm)                      do { } while (0)
  18 #define flush_cache_dup_mm(mm)                  do { } while (0)
  19 #define flush_cache_range(vma, start, end)      do { } while (0)
  20 #define flush_cache_page(vma, vmaddr, pfn)      do { } while (0)
  21 #define flush_icache_page(vma, page)            do { } while (0)
  22 #define flush_cache_vunmap(start, end)          do { } while (0)
  23 
  24 #ifdef CONFIG_PPC_BOOK3S_64
  25 /*
  26  * Book3s has no ptesync after setting a pte, so without this ptesync it's
  27  * possible for a kernel virtual mapping access to return a spurious fault
  28  * if it's accessed right after the pte is set. The page fault handler does
  29  * not expect this type of fault. flush_cache_vmap is not exactly the right
  30  * place to put this, but it seems to work well enough.
  31  */
  32 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  33 {
  34         asm volatile("ptesync" ::: "memory");
  35 }
  36 #else
  37 static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
  38 #endif
  39 
  40 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  41 extern void flush_dcache_page(struct page *page);
  42 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
  43 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
  44 
  45 void flush_icache_range(unsigned long start, unsigned long stop);
  46 extern void flush_icache_user_range(struct vm_area_struct *vma,
  47                                     struct page *page, unsigned long addr,
  48                                     int len);
  49 extern void flush_dcache_icache_page(struct page *page);
  50 void __flush_dcache_icache(void *page);
  51 
  52 /**
  53  * flush_dcache_range(): Write any modified data cache blocks out to memory and
  54  * invalidate them. Does not invalidate the corresponding instruction cache
  55  * blocks.
  56  *
  57  * @start: the start address
  58  * @stop: the stop address (exclusive)
  59  */
  60 static inline void flush_dcache_range(unsigned long start, unsigned long stop)
  61 {
  62         unsigned long shift = l1_dcache_shift();
  63         unsigned long bytes = l1_dcache_bytes();
  64         void *addr = (void *)(start & ~(bytes - 1));
  65         unsigned long size = stop - (unsigned long)addr + (bytes - 1);
  66         unsigned long i;
  67 
  68         if (IS_ENABLED(CONFIG_PPC64)) {
  69                 mb();   /* sync */
  70                 isync();
  71         }
  72 
  73         for (i = 0; i < size >> shift; i++, addr += bytes)
  74                 dcbf(addr);
  75         mb();   /* sync */
  76 
  77         if (IS_ENABLED(CONFIG_PPC64))
  78                 isync();
  79 }
  80 
  81 /*
  82  * Write any modified data cache blocks out to memory.
  83  * Does not invalidate the corresponding cache lines (especially for
  84  * any corresponding instruction cache).
  85  */
  86 static inline void clean_dcache_range(unsigned long start, unsigned long stop)
  87 {
  88         unsigned long shift = l1_dcache_shift();
  89         unsigned long bytes = l1_dcache_bytes();
  90         void *addr = (void *)(start & ~(bytes - 1));
  91         unsigned long size = stop - (unsigned long)addr + (bytes - 1);
  92         unsigned long i;
  93 
  94         for (i = 0; i < size >> shift; i++, addr += bytes)
  95                 dcbst(addr);
  96         mb();   /* sync */
  97 }
  98 
  99 /*
 100  * Like above, but invalidate the D-cache.  This is used by the 8xx
 101  * to invalidate the cache so the PPC core doesn't get stale data
 102  * from the CPM (no cache snooping here :-).
 103  */
 104 static inline void invalidate_dcache_range(unsigned long start,
 105                                            unsigned long stop)
 106 {
 107         unsigned long shift = l1_dcache_shift();
 108         unsigned long bytes = l1_dcache_bytes();
 109         void *addr = (void *)(start & ~(bytes - 1));
 110         unsigned long size = stop - (unsigned long)addr + (bytes - 1);
 111         unsigned long i;
 112 
 113         for (i = 0; i < size >> shift; i++, addr += bytes)
 114                 dcbi(addr);
 115         mb();   /* sync */
 116 }
 117 
 118 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
 119         do { \
 120                 memcpy(dst, src, len); \
 121                 flush_icache_user_range(vma, page, vaddr, len); \
 122         } while (0)
 123 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 124         memcpy(dst, src, len)
 125 
 126 #endif /* __KERNEL__ */
 127 
 128 #endif /* _ASM_POWERPC_CACHEFLUSH_H */

/* [<][>][^][v][top][bottom][index][help] */