root/arch/xtensa/include/asm/mmu_context.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. set_rasid_register
  2. get_rasid_register
  3. get_new_mmu_context
  4. get_mmu_context
  5. activate_context
  6. init_new_context
  7. switch_mm
  8. destroy_context
  9. enter_lazy_tlb

   1 /*
   2  * Switch an MMU context.
   3  *
   4  * This file is subject to the terms and conditions of the GNU General Public
   5  * License.  See the file "COPYING" in the main directory of this archive
   6  * for more details.
   7  *
   8  * Copyright (C) 2001 - 2013 Tensilica Inc.
   9  */
  10 
  11 #ifndef _XTENSA_MMU_CONTEXT_H
  12 #define _XTENSA_MMU_CONTEXT_H
  13 
  14 #ifndef CONFIG_MMU
  15 #include <asm/nommu_context.h>
  16 #else
  17 
  18 #include <linux/stringify.h>
  19 #include <linux/sched.h>
  20 #include <linux/mm_types.h>
  21 
  22 #include <asm/vectors.h>
  23 
  24 #include <asm/pgtable.h>
  25 #include <asm/cacheflush.h>
  26 #include <asm/tlbflush.h>
  27 #include <asm-generic/mm_hooks.h>
  28 #include <asm-generic/percpu.h>
  29 
  30 #if (XCHAL_HAVE_TLBS != 1)
  31 # error "Linux must have an MMU!"
  32 #endif
  33 
  34 DECLARE_PER_CPU(unsigned long, asid_cache);
  35 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
  36 
  37 /*
  38  * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  39  * any user or kernel context.  We use the reserved values in the
  40  * ASID_INSERT macro below.
  41  *
  42  * 0 invalid
  43  * 1 kernel
  44  * 2 reserved
  45  * 3 reserved
  46  * 4...255 available
  47  */
  48 
  49 #define NO_CONTEXT      0
  50 #define ASID_USER_FIRST 4
  51 #define ASID_MASK       ((1 << XCHAL_MMU_ASID_BITS) - 1)
  52 #define ASID_INSERT(x)  (0x03020001 | (((x) & ASID_MASK) << 8))
  53 
  54 void init_mmu(void);
  55 void init_kio(void);
  56 
  57 static inline void set_rasid_register (unsigned long val)
  58 {
  59         __asm__ __volatile__ (" wsr %0, rasid\n\t"
  60                               " isync\n" : : "a" (val));
  61 }
  62 
  63 static inline unsigned long get_rasid_register (void)
  64 {
  65         unsigned long tmp;
  66         __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
  67         return tmp;
  68 }
  69 
  70 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
  71 {
  72         unsigned long asid = cpu_asid_cache(cpu);
  73         if ((++asid & ASID_MASK) == 0) {
  74                 /*
  75                  * Start new asid cycle; continue counting with next
  76                  * incarnation bits; skipping over 0, 1, 2, 3.
  77                  */
  78                 local_flush_tlb_all();
  79                 asid += ASID_USER_FIRST;
  80         }
  81         cpu_asid_cache(cpu) = asid;
  82         mm->context.asid[cpu] = asid;
  83         mm->context.cpu = cpu;
  84 }
  85 
  86 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  87 {
  88         /*
  89          * Check if our ASID is of an older version and thus invalid.
  90          */
  91 
  92         if (mm) {
  93                 unsigned long asid = mm->context.asid[cpu];
  94 
  95                 if (asid == NO_CONTEXT ||
  96                                 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
  97                         get_new_mmu_context(mm, cpu);
  98         }
  99 }
 100 
 101 static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
 102 {
 103         get_mmu_context(mm, cpu);
 104         set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
 105         invalidate_page_directory();
 106 }
 107 
 108 /*
 109  * Initialize the context related info for a new mm_struct
 110  * instance.  Valid cpu values are 0..(NR_CPUS-1), so initializing
 111  * to -1 says the process has never run on any core.
 112  */
 113 
 114 static inline int init_new_context(struct task_struct *tsk,
 115                 struct mm_struct *mm)
 116 {
 117         int cpu;
 118         for_each_possible_cpu(cpu) {
 119                 mm->context.asid[cpu] = NO_CONTEXT;
 120         }
 121         mm->context.cpu = -1;
 122         return 0;
 123 }
 124 
 125 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 126                              struct task_struct *tsk)
 127 {
 128         unsigned int cpu = smp_processor_id();
 129         int migrated = next->context.cpu != cpu;
 130         /* Flush the icache if we migrated to a new core. */
 131         if (migrated) {
 132                 __invalidate_icache_all();
 133                 next->context.cpu = cpu;
 134         }
 135         if (migrated || prev != next)
 136                 activate_context(next, cpu);
 137 }
 138 
 139 #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
 140 #define deactivate_mm(tsk, mm)  do { } while (0)
 141 
 142 /*
 143  * Destroy context related info for an mm_struct that is about
 144  * to be put to rest.
 145  */
 146 static inline void destroy_context(struct mm_struct *mm)
 147 {
 148         invalidate_page_directory();
 149 }
 150 
 151 
 152 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 153 {
 154         /* Nothing to do. */
 155 
 156 }
 157 
 158 #endif /* CONFIG_MMU */
 159 #endif /* _XTENSA_MMU_CONTEXT_H */

/* [<][>][^][v][top][bottom][index][help] */