1/*
2 *  linux/arch/arm/mm/context.c
3 *
4 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 *  Copyright (C) 2012 ARM Limited
6 *
7 *  Author: Will Deacon <will.deacon@arm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/percpu.h>
18
19#include <asm/mmu_context.h>
20#include <asm/smp_plat.h>
21#include <asm/thread_notify.h>
22#include <asm/tlbflush.h>
23#include <asm/proc-fns.h>
24
25/*
26 * On ARMv6, we have the following structure in the Context ID:
27 *
28 * 31                         7          0
29 * +-------------------------+-----------+
30 * |      process ID         |   ASID    |
31 * +-------------------------+-----------+
32 * |              context ID             |
33 * +-------------------------------------+
34 *
35 * The ASID is used to tag entries in the CPU caches and TLBs.
36 * The context ID is used by debuggers and trace logic, and
37 * should be unique within all running processes.
38 *
39 * In big endian operation, the two 32 bit words are swapped if accessed
40 * by non-64-bit operations.
41 */
42#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
43#define NUM_USER_ASIDS		ASID_FIRST_VERSION
44
45static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
47static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
48
49static DEFINE_PER_CPU(atomic64_t, active_asids);
50static DEFINE_PER_CPU(u64, reserved_asids);
51static cpumask_t tlb_flush_pending;
52
53#ifdef CONFIG_ARM_ERRATA_798181
54void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
55			     cpumask_t *mask)
56{
57	int cpu;
58	unsigned long flags;
59	u64 context_id, asid;
60
61	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
62	context_id = mm->context.id.counter;
63	for_each_online_cpu(cpu) {
64		if (cpu == this_cpu)
65			continue;
66		/*
67		 * We only need to send an IPI if the other CPUs are
68		 * running the same ASID as the one being invalidated.
69		 */
70		asid = per_cpu(active_asids, cpu).counter;
71		if (asid == 0)
72			asid = per_cpu(reserved_asids, cpu);
73		if (context_id == asid)
74			cpumask_set_cpu(cpu, mask);
75	}
76	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
77}
78#endif
79
80#ifdef CONFIG_ARM_LPAE
81/*
82 * With LPAE, the ASID and page tables are updated atomicly, so there is
83 * no need for a reserved set of tables (the active ASID tracking prevents
84 * any issues across a rollover).
85 */
86#define cpu_set_reserved_ttbr0()
87#else
88static void cpu_set_reserved_ttbr0(void)
89{
90	u32 ttb;
91	/*
92	 * Copy TTBR1 into TTBR0.
93	 * This points at swapper_pg_dir, which contains only global
94	 * entries so any speculative walks are perfectly safe.
95	 */
96	asm volatile(
97	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
98	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
99	: "=r" (ttb));
100	isb();
101}
102#endif
103
104#ifdef CONFIG_PID_IN_CONTEXTIDR
105static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
106			       void *t)
107{
108	u32 contextidr;
109	pid_t pid;
110	struct thread_info *thread = t;
111
112	if (cmd != THREAD_NOTIFY_SWITCH)
113		return NOTIFY_DONE;
114
115	pid = task_pid_nr(thread->task) << ASID_BITS;
116	asm volatile(
117	"	mrc	p15, 0, %0, c13, c0, 1\n"
118	"	and	%0, %0, %2\n"
119	"	orr	%0, %0, %1\n"
120	"	mcr	p15, 0, %0, c13, c0, 1\n"
121	: "=r" (contextidr), "+r" (pid)
122	: "I" (~ASID_MASK));
123	isb();
124
125	return NOTIFY_OK;
126}
127
128static struct notifier_block contextidr_notifier_block = {
129	.notifier_call = contextidr_notifier,
130};
131
132static int __init contextidr_notifier_init(void)
133{
134	return thread_register_notifier(&contextidr_notifier_block);
135}
136arch_initcall(contextidr_notifier_init);
137#endif
138
139static void flush_context(unsigned int cpu)
140{
141	int i;
142	u64 asid;
143
144	/* Update the list of reserved ASIDs and the ASID bitmap. */
145	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146	for_each_possible_cpu(i) {
147		asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148		/*
149		 * If this CPU has already been through a
150		 * rollover, but hasn't run another task in
151		 * the meantime, we must preserve its reserved
152		 * ASID, as this is the only trace we have of
153		 * the process it is still running.
154		 */
155		if (asid == 0)
156			asid = per_cpu(reserved_asids, i);
157		__set_bit(asid & ~ASID_MASK, asid_map);
158		per_cpu(reserved_asids, i) = asid;
159	}
160
161	/* Queue a TLB invalidate and flush the I-cache if necessary. */
162	cpumask_setall(&tlb_flush_pending);
163
164	if (icache_is_vivt_asid_tagged())
165		__flush_icache_all();
166}
167
168static int is_reserved_asid(u64 asid)
169{
170	int cpu;
171	for_each_possible_cpu(cpu)
172		if (per_cpu(reserved_asids, cpu) == asid)
173			return 1;
174	return 0;
175}
176
177static u64 new_context(struct mm_struct *mm, unsigned int cpu)
178{
179	static u32 cur_idx = 1;
180	u64 asid = atomic64_read(&mm->context.id);
181	u64 generation = atomic64_read(&asid_generation);
182
183	if (asid != 0) {
184		/*
185		 * If our current ASID was active during a rollover, we
186		 * can continue to use it and this was just a false alarm.
187		 */
188		if (is_reserved_asid(asid))
189			return generation | (asid & ~ASID_MASK);
190
191		/*
192		 * We had a valid ASID in a previous life, so try to re-use
193		 * it if possible.,
194		 */
195		asid &= ~ASID_MASK;
196		if (!__test_and_set_bit(asid, asid_map))
197			goto bump_gen;
198	}
199
200	/*
201	 * Allocate a free ASID. If we can't find one, take a note of the
202	 * currently active ASIDs and mark the TLBs as requiring flushes.
203	 * We always count from ASID #1, as we reserve ASID #0 to switch
204	 * via TTBR0 and to avoid speculative page table walks from hitting
205	 * in any partial walk caches, which could be populated from
206	 * overlapping level-1 descriptors used to map both the module
207	 * area and the userspace stack.
208	 */
209	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
210	if (asid == NUM_USER_ASIDS) {
211		generation = atomic64_add_return(ASID_FIRST_VERSION,
212						 &asid_generation);
213		flush_context(cpu);
214		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
215	}
216
217	__set_bit(asid, asid_map);
218	cur_idx = asid;
219
220bump_gen:
221	asid |= generation;
222	cpumask_clear(mm_cpumask(mm));
223	return asid;
224}
225
226void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
227{
228	unsigned long flags;
229	unsigned int cpu = smp_processor_id();
230	u64 asid;
231
232	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
233		__check_vmalloc_seq(mm);
234
235	/*
236	 * We cannot update the pgd and the ASID atomicly with classic
237	 * MMU, so switch exclusively to global mappings to avoid
238	 * speculative page table walking with the wrong TTBR.
239	 */
240	cpu_set_reserved_ttbr0();
241
242	asid = atomic64_read(&mm->context.id);
243	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
244	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
245		goto switch_mm_fastpath;
246
247	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
248	/* Check that our ASID belongs to the current generation. */
249	asid = atomic64_read(&mm->context.id);
250	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
251		asid = new_context(mm, cpu);
252		atomic64_set(&mm->context.id, asid);
253	}
254
255	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
256		local_flush_bp_all();
257		local_flush_tlb_all();
258	}
259
260	atomic64_set(&per_cpu(active_asids, cpu), asid);
261	cpumask_set_cpu(cpu, mm_cpumask(mm));
262	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
263
264switch_mm_fastpath:
265	cpu_switch_mm(mm->pgd, mm);
266}
267