root/arch/nios2/mm/cacheflush.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __flush_dcache
  2. __invalidate_dcache
  3. __flush_icache
  4. flush_aliases
  5. flush_cache_all
  6. flush_cache_mm
  7. flush_cache_dup_mm
  8. flush_icache_range
  9. flush_dcache_range
  10. invalidate_dcache_range
  11. flush_cache_range
  12. flush_icache_page
  13. flush_cache_page
  14. __flush_dcache_page
  15. flush_dcache_page
  16. update_mmu_cache
  17. copy_user_page
  18. clear_user_page
  19. copy_from_user_page
  20. copy_to_user_page

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 2009, Wind River Systems Inc
   7  * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
   8  */
   9 
  10 #include <linux/export.h>
  11 #include <linux/sched.h>
  12 #include <linux/mm.h>
  13 #include <linux/fs.h>
  14 
  15 #include <asm/cacheflush.h>
  16 #include <asm/cpuinfo.h>
  17 
  18 static void __flush_dcache(unsigned long start, unsigned long end)
  19 {
  20         unsigned long addr;
  21 
  22         start &= ~(cpuinfo.dcache_line_size - 1);
  23         end += (cpuinfo.dcache_line_size - 1);
  24         end &= ~(cpuinfo.dcache_line_size - 1);
  25 
  26         if (end > start + cpuinfo.dcache_size)
  27                 end = start + cpuinfo.dcache_size;
  28 
  29         for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
  30                 __asm__ __volatile__ ("   flushd 0(%0)\n"
  31                                         : /* Outputs */
  32                                         : /* Inputs  */ "r"(addr)
  33                                         /* : No clobber */);
  34         }
  35 }
  36 
  37 static void __invalidate_dcache(unsigned long start, unsigned long end)
  38 {
  39         unsigned long addr;
  40 
  41         start &= ~(cpuinfo.dcache_line_size - 1);
  42         end += (cpuinfo.dcache_line_size - 1);
  43         end &= ~(cpuinfo.dcache_line_size - 1);
  44 
  45         for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
  46                 __asm__ __volatile__ ("   initda 0(%0)\n"
  47                                         : /* Outputs */
  48                                         : /* Inputs  */ "r"(addr)
  49                                         /* : No clobber */);
  50         }
  51 }
  52 
  53 static void __flush_icache(unsigned long start, unsigned long end)
  54 {
  55         unsigned long addr;
  56 
  57         start &= ~(cpuinfo.icache_line_size - 1);
  58         end += (cpuinfo.icache_line_size - 1);
  59         end &= ~(cpuinfo.icache_line_size - 1);
  60 
  61         if (end > start + cpuinfo.icache_size)
  62                 end = start + cpuinfo.icache_size;
  63 
  64         for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
  65                 __asm__ __volatile__ ("   flushi %0\n"
  66                                         : /* Outputs */
  67                                         : /* Inputs  */ "r"(addr)
  68                                         /* : No clobber */);
  69         }
  70         __asm__ __volatile(" flushp\n");
  71 }
  72 
  73 static void flush_aliases(struct address_space *mapping, struct page *page)
  74 {
  75         struct mm_struct *mm = current->active_mm;
  76         struct vm_area_struct *mpnt;
  77         pgoff_t pgoff;
  78 
  79         pgoff = page->index;
  80 
  81         flush_dcache_mmap_lock(mapping);
  82         vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  83                 unsigned long offset;
  84 
  85                 if (mpnt->vm_mm != mm)
  86                         continue;
  87                 if (!(mpnt->vm_flags & VM_MAYSHARE))
  88                         continue;
  89 
  90                 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  91                 flush_cache_page(mpnt, mpnt->vm_start + offset,
  92                         page_to_pfn(page));
  93         }
  94         flush_dcache_mmap_unlock(mapping);
  95 }
  96 
  97 void flush_cache_all(void)
  98 {
  99         __flush_dcache(0, cpuinfo.dcache_size);
 100         __flush_icache(0, cpuinfo.icache_size);
 101 }
 102 
 103 void flush_cache_mm(struct mm_struct *mm)
 104 {
 105         flush_cache_all();
 106 }
 107 
 108 void flush_cache_dup_mm(struct mm_struct *mm)
 109 {
 110         flush_cache_all();
 111 }
 112 
 113 void flush_icache_range(unsigned long start, unsigned long end)
 114 {
 115         __flush_dcache(start, end);
 116         __flush_icache(start, end);
 117 }
 118 
 119 void flush_dcache_range(unsigned long start, unsigned long end)
 120 {
 121         __flush_dcache(start, end);
 122         __flush_icache(start, end);
 123 }
 124 EXPORT_SYMBOL(flush_dcache_range);
 125 
 126 void invalidate_dcache_range(unsigned long start, unsigned long end)
 127 {
 128         __invalidate_dcache(start, end);
 129 }
 130 EXPORT_SYMBOL(invalidate_dcache_range);
 131 
 132 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 133                         unsigned long end)
 134 {
 135         __flush_dcache(start, end);
 136         if (vma == NULL || (vma->vm_flags & VM_EXEC))
 137                 __flush_icache(start, end);
 138 }
 139 
 140 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 141 {
 142         unsigned long start = (unsigned long) page_address(page);
 143         unsigned long end = start + PAGE_SIZE;
 144 
 145         __flush_dcache(start, end);
 146         __flush_icache(start, end);
 147 }
 148 
 149 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 150                         unsigned long pfn)
 151 {
 152         unsigned long start = vmaddr;
 153         unsigned long end = start + PAGE_SIZE;
 154 
 155         __flush_dcache(start, end);
 156         if (vma->vm_flags & VM_EXEC)
 157                 __flush_icache(start, end);
 158 }
 159 
 160 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 161 {
 162         /*
 163          * Writeback any data associated with the kernel mapping of this
 164          * page.  This ensures that data in the physical page is mutually
 165          * coherent with the kernels mapping.
 166          */
 167         unsigned long start = (unsigned long)page_address(page);
 168 
 169         __flush_dcache(start, start + PAGE_SIZE);
 170 }
 171 
 172 void flush_dcache_page(struct page *page)
 173 {
 174         struct address_space *mapping;
 175 
 176         /*
 177          * The zero page is never written to, so never has any dirty
 178          * cache lines, and therefore never needs to be flushed.
 179          */
 180         if (page == ZERO_PAGE(0))
 181                 return;
 182 
 183         mapping = page_mapping_file(page);
 184 
 185         /* Flush this page if there are aliases. */
 186         if (mapping && !mapping_mapped(mapping)) {
 187                 clear_bit(PG_dcache_clean, &page->flags);
 188         } else {
 189                 __flush_dcache_page(mapping, page);
 190                 if (mapping) {
 191                         unsigned long start = (unsigned long)page_address(page);
 192                         flush_aliases(mapping,  page);
 193                         flush_icache_range(start, start + PAGE_SIZE);
 194                 }
 195                 set_bit(PG_dcache_clean, &page->flags);
 196         }
 197 }
 198 EXPORT_SYMBOL(flush_dcache_page);
 199 
 200 void update_mmu_cache(struct vm_area_struct *vma,
 201                       unsigned long address, pte_t *ptep)
 202 {
 203         pte_t pte = *ptep;
 204         unsigned long pfn = pte_pfn(pte);
 205         struct page *page;
 206         struct address_space *mapping;
 207 
 208         reload_tlb_page(vma, address, pte);
 209 
 210         if (!pfn_valid(pfn))
 211                 return;
 212 
 213         /*
 214         * The zero page is never written to, so never has any dirty
 215         * cache lines, and therefore never needs to be flushed.
 216         */
 217         page = pfn_to_page(pfn);
 218         if (page == ZERO_PAGE(0))
 219                 return;
 220 
 221         mapping = page_mapping_file(page);
 222         if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 223                 __flush_dcache_page(mapping, page);
 224 
 225         if(mapping)
 226         {
 227                 flush_aliases(mapping, page);
 228                 if (vma->vm_flags & VM_EXEC)
 229                         flush_icache_page(vma, page);
 230         }
 231 }
 232 
 233 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 234                     struct page *to)
 235 {
 236         __flush_dcache(vaddr, vaddr + PAGE_SIZE);
 237         __flush_icache(vaddr, vaddr + PAGE_SIZE);
 238         copy_page(vto, vfrom);
 239         __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
 240         __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
 241 }
 242 
 243 void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
 244 {
 245         __flush_dcache(vaddr, vaddr + PAGE_SIZE);
 246         __flush_icache(vaddr, vaddr + PAGE_SIZE);
 247         clear_page(addr);
 248         __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
 249         __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
 250 }
 251 
 252 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 253                         unsigned long user_vaddr,
 254                         void *dst, void *src, int len)
 255 {
 256         flush_cache_page(vma, user_vaddr, page_to_pfn(page));
 257         memcpy(dst, src, len);
 258         __flush_dcache((unsigned long)src, (unsigned long)src + len);
 259         if (vma->vm_flags & VM_EXEC)
 260                 __flush_icache((unsigned long)src, (unsigned long)src + len);
 261 }
 262 
 263 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 264                         unsigned long user_vaddr,
 265                         void *dst, void *src, int len)
 266 {
 267         flush_cache_page(vma, user_vaddr, page_to_pfn(page));
 268         memcpy(dst, src, len);
 269         __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
 270         if (vma->vm_flags & VM_EXEC)
 271                 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
 272 }

/* [<][>][^][v][top][bottom][index][help] */