1/*
2 *  S390 version
3 *
4 *  Derived from "include/asm-i386/mmu_context.h"
5 */
6
7#ifndef __S390_MMU_CONTEXT_H
8#define __S390_MMU_CONTEXT_H
9
10#include <asm/pgalloc.h>
11#include <asm/uaccess.h>
12#include <asm/tlbflush.h>
13#include <asm/ctl_reg.h>
14
15static inline int init_new_context(struct task_struct *tsk,
16				   struct mm_struct *mm)
17{
18	spin_lock_init(&mm->context.list_lock);
19	INIT_LIST_HEAD(&mm->context.pgtable_list);
20	INIT_LIST_HEAD(&mm->context.gmap_list);
21	cpumask_clear(&mm->context.cpu_attach_mask);
22	atomic_set(&mm->context.attach_count, 0);
23	mm->context.flush_mm = 0;
24#ifdef CONFIG_PGSTE
25	mm->context.alloc_pgste = page_table_allocate_pgste;
26	mm->context.has_pgste = 0;
27	mm->context.use_skey = 0;
28#endif
29	if (mm->context.asce_limit == 0) {
30		/* context created by exec, set asce limit to 4TB */
31		mm->context.asce_bits = _ASCE_TABLE_LENGTH |
32			_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
33		mm->context.asce_limit = STACK_TOP_MAX;
34	} else if (mm->context.asce_limit == (1UL << 31)) {
35		mm_inc_nr_pmds(mm);
36	}
37	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
38	return 0;
39}
40
41#define destroy_context(mm)             do { } while (0)
42
43static inline void set_user_asce(struct mm_struct *mm)
44{
45	S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
46	if (current->thread.mm_segment.ar4)
47		__ctl_load(S390_lowcore.user_asce, 7, 7);
48	set_cpu_flag(CIF_ASCE);
49}
50
51static inline void clear_user_asce(void)
52{
53	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
54
55	__ctl_load(S390_lowcore.user_asce, 1, 1);
56	__ctl_load(S390_lowcore.user_asce, 7, 7);
57}
58
59static inline void load_kernel_asce(void)
60{
61	unsigned long asce;
62
63	__ctl_store(asce, 1, 1);
64	if (asce != S390_lowcore.kernel_asce)
65		__ctl_load(S390_lowcore.kernel_asce, 1, 1);
66	set_cpu_flag(CIF_ASCE);
67}
68
69static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
70			     struct task_struct *tsk)
71{
72	int cpu = smp_processor_id();
73
74	S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
75	if (prev == next)
76		return;
77	if (MACHINE_HAS_TLB_LC)
78		cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
79	/* Clear old ASCE by loading the kernel ASCE. */
80	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
81	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
82	atomic_inc(&next->context.attach_count);
83	atomic_dec(&prev->context.attach_count);
84	if (MACHINE_HAS_TLB_LC)
85		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
86}
87
88#define finish_arch_post_lock_switch finish_arch_post_lock_switch
89static inline void finish_arch_post_lock_switch(void)
90{
91	struct task_struct *tsk = current;
92	struct mm_struct *mm = tsk->mm;
93
94	load_kernel_asce();
95	if (mm) {
96		preempt_disable();
97		while (atomic_read(&mm->context.attach_count) >> 16)
98			cpu_relax();
99
100		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
101		if (mm->context.flush_mm)
102			__tlb_flush_mm(mm);
103		preempt_enable();
104	}
105	set_fs(current->thread.mm_segment);
106}
107
108#define enter_lazy_tlb(mm,tsk)	do { } while (0)
109#define deactivate_mm(tsk,mm)	do { } while (0)
110
111static inline void activate_mm(struct mm_struct *prev,
112                               struct mm_struct *next)
113{
114	switch_mm(prev, next, current);
115	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
116	set_user_asce(next);
117}
118
119static inline void arch_dup_mmap(struct mm_struct *oldmm,
120				 struct mm_struct *mm)
121{
122}
123
124static inline void arch_exit_mmap(struct mm_struct *mm)
125{
126}
127
128static inline void arch_unmap(struct mm_struct *mm,
129			struct vm_area_struct *vma,
130			unsigned long start, unsigned long end)
131{
132}
133
134static inline void arch_bprm_mm_init(struct mm_struct *mm,
135				     struct vm_area_struct *vma)
136{
137}
138
139#endif /* __S390_MMU_CONTEXT_H */
140