root/arch/xtensa/include/asm/tlbflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. itlb_probe
  2. dtlb_probe
  3. invalidate_itlb_entry
  4. invalidate_dtlb_entry
  5. invalidate_itlb_entry_no_isync
  6. invalidate_dtlb_entry_no_isync
  7. set_itlbcfg_register
  8. set_dtlbcfg_register
  9. set_ptevaddr_register
  10. read_ptevaddr_register
  11. write_dtlb_entry
  12. write_itlb_entry
  13. invalidate_page_directory
  14. invalidate_itlb_mapping
  15. invalidate_dtlb_mapping
  16. read_dtlb_virtual
  17. read_dtlb_translation
  18. read_itlb_virtual
  19. read_itlb_translation

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 2001 - 2013 Tensilica Inc.
   7  */
   8 
   9 #ifndef _XTENSA_TLBFLUSH_H
  10 #define _XTENSA_TLBFLUSH_H
  11 
  12 #include <linux/stringify.h>
  13 #include <asm/processor.h>
  14 
  15 #define DTLB_WAY_PGD    7
  16 
  17 #define ITLB_ARF_WAYS   4
  18 #define DTLB_ARF_WAYS   4
  19 
  20 #define ITLB_HIT_BIT    3
  21 #define DTLB_HIT_BIT    4
  22 
  23 #ifndef __ASSEMBLY__
  24 
  25 /* TLB flushing:
  26  *
  27  *  - flush_tlb_all() flushes all processes TLB entries
  28  *  - flush_tlb_mm(mm) flushes the specified mm context TLB entries
  29  *  - flush_tlb_page(mm, vmaddr) flushes a single page
  30  *  - flush_tlb_range(mm, start, end) flushes a range of pages
  31  */
  32 
  33 void local_flush_tlb_all(void);
  34 void local_flush_tlb_mm(struct mm_struct *mm);
  35 void local_flush_tlb_page(struct vm_area_struct *vma,
  36                 unsigned long page);
  37 void local_flush_tlb_range(struct vm_area_struct *vma,
  38                 unsigned long start, unsigned long end);
  39 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
  40 
  41 #ifdef CONFIG_SMP
  42 
  43 void flush_tlb_all(void);
  44 void flush_tlb_mm(struct mm_struct *);
  45 void flush_tlb_page(struct vm_area_struct *, unsigned long);
  46 void flush_tlb_range(struct vm_area_struct *, unsigned long,
  47                 unsigned long);
  48 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  49 
  50 #else /* !CONFIG_SMP */
  51 
  52 #define flush_tlb_all()                    local_flush_tlb_all()
  53 #define flush_tlb_mm(mm)                   local_flush_tlb_mm(mm)
  54 #define flush_tlb_page(vma, page)          local_flush_tlb_page(vma, page)
  55 #define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
  56                                                                  end)
  57 #define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
  58                                                                         end)
  59 
  60 #endif /* CONFIG_SMP */
  61 
  62 /* TLB operations. */
  63 
  64 static inline unsigned long itlb_probe(unsigned long addr)
  65 {
  66         unsigned long tmp;
  67         __asm__ __volatile__("pitlb  %0, %1\n\t" : "=a" (tmp) : "a" (addr));
  68         return tmp;
  69 }
  70 
  71 static inline unsigned long dtlb_probe(unsigned long addr)
  72 {
  73         unsigned long tmp;
  74         __asm__ __volatile__("pdtlb  %0, %1\n\t" : "=a" (tmp) : "a" (addr));
  75         return tmp;
  76 }
  77 
  78 static inline void invalidate_itlb_entry (unsigned long probe)
  79 {
  80         __asm__ __volatile__("iitlb  %0; isync\n\t" : : "a" (probe));
  81 }
  82 
  83 static inline void invalidate_dtlb_entry (unsigned long probe)
  84 {
  85         __asm__ __volatile__("idtlb  %0; dsync\n\t" : : "a" (probe));
  86 }
  87 
  88 /* Use the .._no_isync functions with caution.  Generally, these are
  89  * handy for bulk invalidates followed by a single 'isync'.  The
  90  * caller must follow up with an 'isync', which can be relatively
  91  * expensive on some Xtensa implementations.
  92  */
  93 static inline void invalidate_itlb_entry_no_isync (unsigned entry)
  94 {
  95         /* Caller must follow up with 'isync'. */
  96         __asm__ __volatile__ ("iitlb  %0\n" : : "a" (entry) );
  97 }
  98 
  99 static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
 100 {
 101         /* Caller must follow up with 'isync'. */
 102         __asm__ __volatile__ ("idtlb  %0\n" : : "a" (entry) );
 103 }
 104 
 105 static inline void set_itlbcfg_register (unsigned long val)
 106 {
 107         __asm__ __volatile__("wsr  %0, itlbcfg\n\t" "isync\n\t"
 108                              : : "a" (val));
 109 }
 110 
 111 static inline void set_dtlbcfg_register (unsigned long val)
 112 {
 113         __asm__ __volatile__("wsr  %0, dtlbcfg; dsync\n\t"
 114                              : : "a" (val));
 115 }
 116 
 117 static inline void set_ptevaddr_register (unsigned long val)
 118 {
 119         __asm__ __volatile__(" wsr  %0, ptevaddr; isync\n"
 120                              : : "a" (val));
 121 }
 122 
 123 static inline unsigned long read_ptevaddr_register (void)
 124 {
 125         unsigned long tmp;
 126         __asm__ __volatile__("rsr  %0, ptevaddr\n\t" : "=a" (tmp));
 127         return tmp;
 128 }
 129 
 130 static inline void write_dtlb_entry (pte_t entry, int way)
 131 {
 132         __asm__ __volatile__("wdtlb  %1, %0; dsync\n\t"
 133                              : : "r" (way), "r" (entry) );
 134 }
 135 
 136 static inline void write_itlb_entry (pte_t entry, int way)
 137 {
 138         __asm__ __volatile__("witlb  %1, %0; isync\n\t"
 139                              : : "r" (way), "r" (entry) );
 140 }
 141 
 142 static inline void invalidate_page_directory (void)
 143 {
 144         invalidate_dtlb_entry (DTLB_WAY_PGD);
 145         invalidate_dtlb_entry (DTLB_WAY_PGD+1);
 146         invalidate_dtlb_entry (DTLB_WAY_PGD+2);
 147 }
 148 
 149 static inline void invalidate_itlb_mapping (unsigned address)
 150 {
 151         unsigned long tlb_entry;
 152         if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
 153                 invalidate_itlb_entry(tlb_entry);
 154 }
 155 
 156 static inline void invalidate_dtlb_mapping (unsigned address)
 157 {
 158         unsigned long tlb_entry;
 159         if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
 160                 invalidate_dtlb_entry(tlb_entry);
 161 }
 162 
 163 /*
 164  * DO NOT USE THESE FUNCTIONS.  These instructions aren't part of the Xtensa
 165  * ISA and exist only for test purposes..
 166  * You may find it helpful for MMU debugging, however.
 167  *
 168  * 'at' is the unmodified input register
 169  * 'as' is the output register, as follows (specific to the Linux config):
 170  *
 171  *      as[31..12] contain the virtual address
 172  *      as[11..08] are meaningless
 173  *      as[07..00] contain the asid
 174  */
 175 
 176 static inline unsigned long read_dtlb_virtual (int way)
 177 {
 178         unsigned long tmp;
 179         __asm__ __volatile__("rdtlb0  %0, %1\n\t" : "=a" (tmp), "+a" (way));
 180         return tmp;
 181 }
 182 
 183 static inline unsigned long read_dtlb_translation (int way)
 184 {
 185         unsigned long tmp;
 186         __asm__ __volatile__("rdtlb1  %0, %1\n\t" : "=a" (tmp), "+a" (way));
 187         return tmp;
 188 }
 189 
 190 static inline unsigned long read_itlb_virtual (int way)
 191 {
 192         unsigned long tmp;
 193         __asm__ __volatile__("ritlb0  %0, %1\n\t" : "=a" (tmp), "+a" (way));
 194         return tmp;
 195 }
 196 
 197 static inline unsigned long read_itlb_translation (int way)
 198 {
 199         unsigned long tmp;
 200         __asm__ __volatile__("ritlb1  %0, %1\n\t" : "=a" (tmp), "+a" (way));
 201         return tmp;
 202 }
 203 
 204 #endif  /* __ASSEMBLY__ */
 205 #endif  /* _XTENSA_TLBFLUSH_H */

/* [<][>][^][v][top][bottom][index][help] */