root/arch/alpha/include/asm/tlbflush.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ev4_flush_tlb_current
  2. ev5_flush_tlb_current
  3. ev4_flush_tlb_current_page
  4. ev5_flush_tlb_current_page
  5. flush_tlb
  6. flush_tlb_other
  7. flush_tlb_all
  8. flush_tlb_mm
  9. flush_tlb_page
  10. flush_tlb_range
  11. flush_tlb_kernel_range

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ALPHA_TLBFLUSH_H
   3 #define _ALPHA_TLBFLUSH_H
   4 
   5 #include <linux/mm.h>
   6 #include <linux/sched.h>
   7 #include <asm/compiler.h>
   8 #include <asm/pgalloc.h>
   9 
  10 #ifndef __EXTERN_INLINE
  11 #define __EXTERN_INLINE extern inline
  12 #define __MMU_EXTERN_INLINE
  13 #endif
  14 
  15 extern void __load_new_mm_context(struct mm_struct *);
  16 
  17 
  18 /* Use a few helper functions to hide the ugly broken ASN
  19    numbers on early Alphas (ev4 and ev45).  */
  20 
  21 __EXTERN_INLINE void
  22 ev4_flush_tlb_current(struct mm_struct *mm)
  23 {
  24         __load_new_mm_context(mm);
  25         tbiap();
  26 }
  27 
  28 __EXTERN_INLINE void
  29 ev5_flush_tlb_current(struct mm_struct *mm)
  30 {
  31         __load_new_mm_context(mm);
  32 }
  33 
  34 /* Flush just one page in the current TLB set.  We need to be very
  35    careful about the icache here, there is no way to invalidate a
  36    specific icache page.  */
  37 
  38 __EXTERN_INLINE void
  39 ev4_flush_tlb_current_page(struct mm_struct * mm,
  40                            struct vm_area_struct *vma,
  41                            unsigned long addr)
  42 {
  43         int tbi_flag = 2;
  44         if (vma->vm_flags & VM_EXEC) {
  45                 __load_new_mm_context(mm);
  46                 tbi_flag = 3;
  47         }
  48         tbi(tbi_flag, addr);
  49 }
  50 
  51 __EXTERN_INLINE void
  52 ev5_flush_tlb_current_page(struct mm_struct * mm,
  53                            struct vm_area_struct *vma,
  54                            unsigned long addr)
  55 {
  56         if (vma->vm_flags & VM_EXEC)
  57                 __load_new_mm_context(mm);
  58         else
  59                 tbi(2, addr);
  60 }
  61 
  62 
  63 #ifdef CONFIG_ALPHA_GENERIC
  64 # define flush_tlb_current              alpha_mv.mv_flush_tlb_current
  65 # define flush_tlb_current_page         alpha_mv.mv_flush_tlb_current_page
  66 #else
  67 # ifdef CONFIG_ALPHA_EV4
  68 #  define flush_tlb_current             ev4_flush_tlb_current
  69 #  define flush_tlb_current_page        ev4_flush_tlb_current_page
  70 # else
  71 #  define flush_tlb_current             ev5_flush_tlb_current
  72 #  define flush_tlb_current_page        ev5_flush_tlb_current_page
  73 # endif
  74 #endif
  75 
  76 #ifdef __MMU_EXTERN_INLINE
  77 #undef __EXTERN_INLINE
  78 #undef __MMU_EXTERN_INLINE
  79 #endif
  80 
  81 /* Flush current user mapping.  */
  82 static inline void
  83 flush_tlb(void)
  84 {
  85         flush_tlb_current(current->active_mm);
  86 }
  87 
  88 /* Flush someone else's user mapping.  */
  89 static inline void
  90 flush_tlb_other(struct mm_struct *mm)
  91 {
  92         unsigned long *mmc = &mm->context[smp_processor_id()];
  93         /* Check it's not zero first to avoid cacheline ping pong
  94            when possible.  */
  95         if (*mmc) *mmc = 0;
  96 }
  97 
  98 #ifndef CONFIG_SMP
  99 /* Flush everything (kernel mapping may also have changed
 100    due to vmalloc/vfree).  */
 101 static inline void flush_tlb_all(void)
 102 {
 103         tbia();
 104 }
 105 
 106 /* Flush a specified user mapping.  */
 107 static inline void
 108 flush_tlb_mm(struct mm_struct *mm)
 109 {
 110         if (mm == current->active_mm)
 111                 flush_tlb_current(mm);
 112         else
 113                 flush_tlb_other(mm);
 114 }
 115 
 116 /* Page-granular tlb flush.  */
 117 static inline void
 118 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 119 {
 120         struct mm_struct *mm = vma->vm_mm;
 121 
 122         if (mm == current->active_mm)
 123                 flush_tlb_current_page(mm, vma, addr);
 124         else
 125                 flush_tlb_other(mm);
 126 }
 127 
 128 /* Flush a specified range of user mapping.  On the Alpha we flush
 129    the whole user tlb.  */
 130 static inline void
 131 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 132                 unsigned long end)
 133 {
 134         flush_tlb_mm(vma->vm_mm);
 135 }
 136 
 137 #else /* CONFIG_SMP */
 138 
 139 extern void flush_tlb_all(void);
 140 extern void flush_tlb_mm(struct mm_struct *);
 141 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 142 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
 143                             unsigned long);
 144 
 145 #endif /* CONFIG_SMP */
 146 
 147 static inline void flush_tlb_kernel_range(unsigned long start,
 148                                         unsigned long end)
 149 {
 150         flush_tlb_all();
 151 }
 152 
 153 #endif /* _ALPHA_TLBFLUSH_H */

/* [<][>][^][v][top][bottom][index][help] */