root/arch/csky/mm/tlb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. flush_tlb_all
  2. flush_tlb_mm
  3. flush_tlb_range
  4. flush_tlb_kernel_range
  5. flush_tlb_page
  6. flush_tlb_one

   1 // SPDX-License-Identifier: GPL-2.0
   2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
   3 
   4 #include <linux/init.h>
   5 #include <linux/mm.h>
   6 #include <linux/module.h>
   7 #include <linux/sched.h>
   8 
   9 #include <asm/mmu_context.h>
  10 #include <asm/pgtable.h>
  11 #include <asm/setup.h>
  12 
  13 /*
  14  * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
  15  * 1VPN -> 2PFN
  16  */
  17 #define TLB_ENTRY_SIZE          (PAGE_SIZE * 2)
  18 #define TLB_ENTRY_SIZE_MASK     (PAGE_MASK << 1)
  19 
  20 void flush_tlb_all(void)
  21 {
  22         tlb_invalid_all();
  23 }
  24 
  25 void flush_tlb_mm(struct mm_struct *mm)
  26 {
  27 #ifdef CONFIG_CPU_HAS_TLBI
  28         asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
  29 #else
  30         tlb_invalid_all();
  31 #endif
  32 }
  33 
  34 /*
  35  * MMU operation regs only could invalid tlb entry in jtlb and we
  36  * need change asid field to invalid I-utlb & D-utlb.
  37  */
  38 #ifndef CONFIG_CPU_HAS_TLBI
  39 #define restore_asid_inv_utlb(oldpid, newpid) \
  40 do { \
  41         if (oldpid == newpid) \
  42                 write_mmu_entryhi(oldpid + 1); \
  43         write_mmu_entryhi(oldpid); \
  44 } while (0)
  45 #endif
  46 
  47 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  48                         unsigned long end)
  49 {
  50         unsigned long newpid = cpu_asid(vma->vm_mm);
  51 
  52         start &= TLB_ENTRY_SIZE_MASK;
  53         end   += TLB_ENTRY_SIZE - 1;
  54         end   &= TLB_ENTRY_SIZE_MASK;
  55 
  56 #ifdef CONFIG_CPU_HAS_TLBI
  57         while (start < end) {
  58                 asm volatile("tlbi.vas %0"::"r"(start | newpid));
  59                 start += 2*PAGE_SIZE;
  60         }
  61         sync_is();
  62 #else
  63         {
  64         unsigned long flags, oldpid;
  65 
  66         local_irq_save(flags);
  67         oldpid = read_mmu_entryhi() & ASID_MASK;
  68         while (start < end) {
  69                 int idx;
  70 
  71                 write_mmu_entryhi(start | newpid);
  72                 start += 2*PAGE_SIZE;
  73                 tlb_probe();
  74                 idx = read_mmu_index();
  75                 if (idx >= 0)
  76                         tlb_invalid_indexed();
  77         }
  78         restore_asid_inv_utlb(oldpid, newpid);
  79         local_irq_restore(flags);
  80         }
  81 #endif
  82 }
  83 
  84 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  85 {
  86         start &= TLB_ENTRY_SIZE_MASK;
  87         end   += TLB_ENTRY_SIZE - 1;
  88         end   &= TLB_ENTRY_SIZE_MASK;
  89 
  90 #ifdef CONFIG_CPU_HAS_TLBI
  91         while (start < end) {
  92                 asm volatile("tlbi.vaas %0"::"r"(start));
  93                 start += 2*PAGE_SIZE;
  94         }
  95         sync_is();
  96 #else
  97         {
  98         unsigned long flags, oldpid;
  99 
 100         local_irq_save(flags);
 101         oldpid = read_mmu_entryhi() & ASID_MASK;
 102         while (start < end) {
 103                 int idx;
 104 
 105                 write_mmu_entryhi(start | oldpid);
 106                 start += 2*PAGE_SIZE;
 107                 tlb_probe();
 108                 idx = read_mmu_index();
 109                 if (idx >= 0)
 110                         tlb_invalid_indexed();
 111         }
 112         restore_asid_inv_utlb(oldpid, oldpid);
 113         local_irq_restore(flags);
 114         }
 115 #endif
 116 }
 117 
 118 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 119 {
 120         int newpid = cpu_asid(vma->vm_mm);
 121 
 122         addr &= TLB_ENTRY_SIZE_MASK;
 123 
 124 #ifdef CONFIG_CPU_HAS_TLBI
 125         asm volatile("tlbi.vas %0"::"r"(addr | newpid));
 126         sync_is();
 127 #else
 128         {
 129         int oldpid, idx;
 130         unsigned long flags;
 131 
 132         local_irq_save(flags);
 133         oldpid = read_mmu_entryhi() & ASID_MASK;
 134         write_mmu_entryhi(addr | newpid);
 135         tlb_probe();
 136         idx = read_mmu_index();
 137         if (idx >= 0)
 138                 tlb_invalid_indexed();
 139 
 140         restore_asid_inv_utlb(oldpid, newpid);
 141         local_irq_restore(flags);
 142         }
 143 #endif
 144 }
 145 
 146 void flush_tlb_one(unsigned long addr)
 147 {
 148         addr &= TLB_ENTRY_SIZE_MASK;
 149 
 150 #ifdef CONFIG_CPU_HAS_TLBI
 151         asm volatile("tlbi.vaas %0"::"r"(addr));
 152         sync_is();
 153 #else
 154         {
 155         int oldpid, idx;
 156         unsigned long flags;
 157 
 158         local_irq_save(flags);
 159         oldpid = read_mmu_entryhi() & ASID_MASK;
 160         write_mmu_entryhi(addr | oldpid);
 161         tlb_probe();
 162         idx = read_mmu_index();
 163         if (idx >= 0)
 164                 tlb_invalid_indexed();
 165 
 166         restore_asid_inv_utlb(oldpid, oldpid);
 167         local_irq_restore(flags);
 168         }
 169 #endif
 170 }
 171 EXPORT_SYMBOL(flush_tlb_one);

/* [<][>][^][v][top][bottom][index][help] */