This source file includes following definitions.
- init_new_context
- __new_context
- check_context
- enter_lazy_tlb
- switch_mm
1
2
3
4 #ifndef __ASM_NDS32_MMU_CONTEXT_H
5 #define __ASM_NDS32_MMU_CONTEXT_H
6
7 #include <linux/spinlock.h>
8 #include <asm/tlbflush.h>
9 #include <asm/proc-fns.h>
10 #include <asm-generic/mm_hooks.h>
11
12 static inline int
13 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
14 {
15 mm->context.id = 0;
16 return 0;
17 }
18
19 #define destroy_context(mm) do { } while(0)
20
21 #define CID_BITS 9
22 extern spinlock_t cid_lock;
23 extern unsigned int cpu_last_cid;
24
25 static inline void __new_context(struct mm_struct *mm)
26 {
27 unsigned int cid;
28 unsigned long flags;
29
30 spin_lock_irqsave(&cid_lock, flags);
31 cid = cpu_last_cid;
32 cpu_last_cid += 1 << TLB_MISC_offCID;
33 if (cpu_last_cid == 0)
34 cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS;
35
36 if ((cid & TLB_MISC_mskCID) == 0)
37 flush_tlb_all();
38 spin_unlock_irqrestore(&cid_lock, flags);
39
40 mm->context.id = cid;
41 }
42
43 static inline void check_context(struct mm_struct *mm)
44 {
45 if (unlikely
46 ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS))
47 __new_context(mm);
48 }
49
50 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
51 {
52 }
53
54 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
55 struct task_struct *tsk)
56 {
57 unsigned int cpu = smp_processor_id();
58
59 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
60 check_context(next);
61 cpu_switch_mm(next);
62 }
63 }
64
65 #define deactivate_mm(tsk,mm) do { } while (0)
66 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
67
68 #endif