This source file includes following definitions.
- enter_lazy_tlb
- switch_mm
- arch_dup_mmap
- arch_unmap
- arch_bprm_mm_init
- arch_vma_access_permitted
1
2
3
4
5
6
7
8
9 #ifndef __UNICORE_MMU_CONTEXT_H__
10 #define __UNICORE_MMU_CONTEXT_H__
11
12 #include <linux/compiler.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/vmacache.h>
16 #include <linux/io.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/cpu-single.h>
20
21 #define init_new_context(tsk, mm) 0
22
23 #define destroy_context(mm) do { } while (0)
24
25
26
27
28
29
30
31
32
33
34 static inline void
35 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
36 {
37 }
38
39
40
41
42
43
44
45 static inline void
46 switch_mm(struct mm_struct *prev, struct mm_struct *next,
47 struct task_struct *tsk)
48 {
49 unsigned int cpu = smp_processor_id();
50
51 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
52 cpu_switch_mm(next->pgd, next);
53 }
54
55 #define deactivate_mm(tsk, mm) do { } while (0)
56 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
57
58
59
60
61
62
63
64
65 #define arch_exit_mmap(mm) \
66 do { \
67 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
68 if (high_vma) { \
69 BUG_ON(high_vma->vm_next); \
70 if (high_vma->vm_prev) \
71 high_vma->vm_prev->vm_next = NULL; \
72 else \
73 mm->mmap = NULL; \
74 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
75 vmacache_invalidate(mm); \
76 mm->map_count--; \
77 remove_vma(high_vma); \
78 } \
79 } while (0)
80
81 static inline int arch_dup_mmap(struct mm_struct *oldmm,
82 struct mm_struct *mm)
83 {
84 return 0;
85 }
86
87 static inline void arch_unmap(struct mm_struct *mm,
88 unsigned long start, unsigned long end)
89 {
90 }
91
92 static inline void arch_bprm_mm_init(struct mm_struct *mm,
93 struct vm_area_struct *vma)
94 {
95 }
96
97 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
98 bool write, bool execute, bool foreign)
99 {
100
101 return true;
102 }
103 #endif