1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU is not using the hash
4 * table, such as 8xx, 4xx, BookE's etc...
5 *
6 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
7 *                IBM Corp.
8 *
9 *  Derived from previous arch/powerpc/mm/mmu_context.c
10 *  and arch/powerpc/include/asm/mmu_context.h
11 *
12 *  This program is free software; you can redistribute it and/or
13 *  modify it under the terms of the GNU General Public License
14 *  as published by the Free Software Foundation; either version
15 *  2 of the License, or (at your option) any later version.
16 *
17 * TODO:
18 *
19 *   - The global context lock will not scale very well
20 *   - The maps should be dynamically allocated to allow for processors
21 *     that support more PID bits at runtime
22 *   - Implement flush_tlb_mm() by making the context stale and picking
23 *     a new one
24 *   - More aggressively clear stale map bits and maybe find some way to
25 *     also clear mm->cpu_vm_mask bits when processes are migrated
26 */
27
28//#define DEBUG_MAP_CONSISTENCY
29//#define DEBUG_CLAMP_LAST_CONTEXT   31
30//#define DEBUG_HARDER
31
32/* We don't use DEBUG because it tends to be compiled in always nowadays
33 * and this would generate way too much output
34 */
35#ifdef DEBUG_HARDER
36#define pr_hard(args...)	printk(KERN_DEBUG args)
37#define pr_hardcont(args...)	printk(KERN_CONT args)
38#else
39#define pr_hard(args...)	do { } while(0)
40#define pr_hardcont(args...)	do { } while(0)
41#endif
42
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/init.h>
46#include <linux/spinlock.h>
47#include <linux/bootmem.h>
48#include <linux/notifier.h>
49#include <linux/cpu.h>
50#include <linux/slab.h>
51
52#include <asm/mmu_context.h>
53#include <asm/tlbflush.h>
54
55#include "mmu_decl.h"
56
57static unsigned int first_context, last_context;
58static unsigned int next_context, nr_free_contexts;
59static unsigned long *context_map;
60static unsigned long *stale_map[NR_CPUS];
61static struct mm_struct **context_mm;
62static DEFINE_RAW_SPINLOCK(context_lock);
63static bool no_selective_tlbil;
64
65#define CTX_MAP_SIZE	\
66	(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
67
68
69/* Steal a context from a task that has one at the moment.
70 *
71 * This is used when we are running out of available PID numbers
72 * on the processors.
73 *
74 * This isn't an LRU system, it just frees up each context in
75 * turn (sort-of pseudo-random replacement :).  This would be the
76 * place to implement an LRU scheme if anyone was motivated to do it.
77 *  -- paulus
78 *
79 * For context stealing, we use a slightly different approach for
80 * SMP and UP. Basically, the UP one is simpler and doesn't use
81 * the stale map as we can just flush the local CPU
82 *  -- benh
83 */
84#ifdef CONFIG_SMP
85static unsigned int steal_context_smp(unsigned int id)
86{
87	struct mm_struct *mm;
88	unsigned int cpu, max, i;
89
90	max = last_context - first_context;
91
92	/* Attempt to free next_context first and then loop until we manage */
93	while (max--) {
94		/* Pick up the victim mm */
95		mm = context_mm[id];
96
97		/* We have a candidate victim, check if it's active, on SMP
98		 * we cannot steal active contexts
99		 */
100		if (mm->context.active) {
101			id++;
102			if (id > last_context)
103				id = first_context;
104			continue;
105		}
106		pr_hardcont(" | steal %d from 0x%p", id, mm);
107
108		/* Mark this mm has having no context anymore */
109		mm->context.id = MMU_NO_CONTEXT;
110
111		/* Mark it stale on all CPUs that used this mm. For threaded
112		 * implementations, we set it on all threads on each core
113		 * represented in the mask. A future implementation will use
114		 * a core map instead but this will do for now.
115		 */
116		for_each_cpu(cpu, mm_cpumask(mm)) {
117			for (i = cpu_first_thread_sibling(cpu);
118			     i <= cpu_last_thread_sibling(cpu); i++) {
119				if (stale_map[i])
120					__set_bit(id, stale_map[i]);
121			}
122			cpu = i - 1;
123		}
124		return id;
125	}
126
127	/* This will happen if you have more CPUs than available contexts,
128	 * all we can do here is wait a bit and try again
129	 */
130	raw_spin_unlock(&context_lock);
131	cpu_relax();
132	raw_spin_lock(&context_lock);
133
134	/* This will cause the caller to try again */
135	return MMU_NO_CONTEXT;
136}
137#endif  /* CONFIG_SMP */
138
139static unsigned int steal_all_contexts(void)
140{
141	struct mm_struct *mm;
142	int cpu = smp_processor_id();
143	unsigned int id;
144
145	for (id = first_context; id <= last_context; id++) {
146		/* Pick up the victim mm */
147		mm = context_mm[id];
148
149		pr_hardcont(" | steal %d from 0x%p", id, mm);
150
151		/* Mark this mm as having no context anymore */
152		mm->context.id = MMU_NO_CONTEXT;
153		if (id != first_context) {
154			context_mm[id] = NULL;
155			__clear_bit(id, context_map);
156#ifdef DEBUG_MAP_CONSISTENCY
157			mm->context.active = 0;
158#endif
159		}
160		__clear_bit(id, stale_map[cpu]);
161	}
162
163	/* Flush the TLB for all contexts (not to be used on SMP) */
164	_tlbil_all();
165
166	nr_free_contexts = last_context - first_context;
167
168	return first_context;
169}
170
171/* Note that this will also be called on SMP if all other CPUs are
172 * offlined, which means that it may be called for cpu != 0. For
173 * this to work, we somewhat assume that CPUs that are onlined
174 * come up with a fully clean TLB (or are cleaned when offlined)
175 */
176static unsigned int steal_context_up(unsigned int id)
177{
178	struct mm_struct *mm;
179	int cpu = smp_processor_id();
180
181	/* Pick up the victim mm */
182	mm = context_mm[id];
183
184	pr_hardcont(" | steal %d from 0x%p", id, mm);
185
186	/* Flush the TLB for that context */
187	local_flush_tlb_mm(mm);
188
189	/* Mark this mm has having no context anymore */
190	mm->context.id = MMU_NO_CONTEXT;
191
192	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
193	__clear_bit(id, stale_map[cpu]);
194
195	return id;
196}
197
198#ifdef DEBUG_MAP_CONSISTENCY
199static void context_check_map(void)
200{
201	unsigned int id, nrf, nact;
202
203	nrf = nact = 0;
204	for (id = first_context; id <= last_context; id++) {
205		int used = test_bit(id, context_map);
206		if (!used)
207			nrf++;
208		if (used != (context_mm[id] != NULL))
209			pr_err("MMU: Context %d is %s and MM is %p !\n",
210			       id, used ? "used" : "free", context_mm[id]);
211		if (context_mm[id] != NULL)
212			nact += context_mm[id]->context.active;
213	}
214	if (nrf != nr_free_contexts) {
215		pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
216		       nr_free_contexts, nrf);
217		nr_free_contexts = nrf;
218	}
219	if (nact > num_online_cpus())
220		pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
221		       nact, num_online_cpus());
222	if (first_context > 0 && !test_bit(0, context_map))
223		pr_err("MMU: Context 0 has been freed !!!\n");
224}
225#else
226static void context_check_map(void) { }
227#endif
228
229void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
230{
231	unsigned int i, id, cpu = smp_processor_id();
232	unsigned long *map;
233
234	/* No lockless fast path .. yet */
235	raw_spin_lock(&context_lock);
236
237	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
238		cpu, next, next->context.active, next->context.id);
239
240#ifdef CONFIG_SMP
241	/* Mark us active and the previous one not anymore */
242	next->context.active++;
243	if (prev) {
244		pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
245		WARN_ON(prev->context.active < 1);
246		prev->context.active--;
247	}
248
249 again:
250#endif /* CONFIG_SMP */
251
252	/* If we already have a valid assigned context, skip all that */
253	id = next->context.id;
254	if (likely(id != MMU_NO_CONTEXT)) {
255#ifdef DEBUG_MAP_CONSISTENCY
256		if (context_mm[id] != next)
257			pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
258			       next, id, id, context_mm[id]);
259#endif
260		goto ctxt_ok;
261	}
262
263	/* We really don't have a context, let's try to acquire one */
264	id = next_context;
265	if (id > last_context)
266		id = first_context;
267	map = context_map;
268
269	/* No more free contexts, let's try to steal one */
270	if (nr_free_contexts == 0) {
271#ifdef CONFIG_SMP
272		if (num_online_cpus() > 1) {
273			id = steal_context_smp(id);
274			if (id == MMU_NO_CONTEXT)
275				goto again;
276			goto stolen;
277		}
278#endif /* CONFIG_SMP */
279		if (no_selective_tlbil)
280			id = steal_all_contexts();
281		else
282			id = steal_context_up(id);
283		goto stolen;
284	}
285	nr_free_contexts--;
286
287	/* We know there's at least one free context, try to find it */
288	while (__test_and_set_bit(id, map)) {
289		id = find_next_zero_bit(map, last_context+1, id);
290		if (id > last_context)
291			id = first_context;
292	}
293 stolen:
294	next_context = id + 1;
295	context_mm[id] = next;
296	next->context.id = id;
297	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
298
299	context_check_map();
300 ctxt_ok:
301
302	/* If that context got marked stale on this CPU, then flush the
303	 * local TLB for it and unmark it before we use it
304	 */
305	if (test_bit(id, stale_map[cpu])) {
306		pr_hardcont(" | stale flush %d [%d..%d]",
307			    id, cpu_first_thread_sibling(cpu),
308			    cpu_last_thread_sibling(cpu));
309
310		local_flush_tlb_mm(next);
311
312		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
313		for (i = cpu_first_thread_sibling(cpu);
314		     i <= cpu_last_thread_sibling(cpu); i++) {
315			if (stale_map[i])
316				__clear_bit(id, stale_map[i]);
317		}
318	}
319
320	/* Flick the MMU and release lock */
321	pr_hardcont(" -> %d\n", id);
322	set_context(id, next->pgd);
323	raw_spin_unlock(&context_lock);
324}
325
326/*
327 * Set up the context for a new address space.
328 */
329int init_new_context(struct task_struct *t, struct mm_struct *mm)
330{
331	pr_hard("initing context for mm @%p\n", mm);
332
333	mm->context.id = MMU_NO_CONTEXT;
334	mm->context.active = 0;
335
336#ifdef CONFIG_PPC_MM_SLICES
337	if (slice_mm_new_context(mm))
338		slice_set_user_psize(mm, mmu_virtual_psize);
339#endif
340
341	return 0;
342}
343
344/*
345 * We're finished using the context for an address space.
346 */
347void destroy_context(struct mm_struct *mm)
348{
349	unsigned long flags;
350	unsigned int id;
351
352	if (mm->context.id == MMU_NO_CONTEXT)
353		return;
354
355	WARN_ON(mm->context.active != 0);
356
357	raw_spin_lock_irqsave(&context_lock, flags);
358	id = mm->context.id;
359	if (id != MMU_NO_CONTEXT) {
360		__clear_bit(id, context_map);
361		mm->context.id = MMU_NO_CONTEXT;
362#ifdef DEBUG_MAP_CONSISTENCY
363		mm->context.active = 0;
364#endif
365		context_mm[id] = NULL;
366		nr_free_contexts++;
367	}
368	raw_spin_unlock_irqrestore(&context_lock, flags);
369}
370
371#ifdef CONFIG_SMP
372
373static int mmu_context_cpu_notify(struct notifier_block *self,
374				  unsigned long action, void *hcpu)
375{
376	unsigned int cpu = (unsigned int)(long)hcpu;
377
378	/* We don't touch CPU 0 map, it's allocated at aboot and kept
379	 * around forever
380	 */
381	if (cpu == boot_cpuid)
382		return NOTIFY_OK;
383
384	switch (action) {
385	case CPU_UP_PREPARE:
386	case CPU_UP_PREPARE_FROZEN:
387		pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
388		stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
389		break;
390#ifdef CONFIG_HOTPLUG_CPU
391	case CPU_UP_CANCELED:
392	case CPU_UP_CANCELED_FROZEN:
393	case CPU_DEAD:
394	case CPU_DEAD_FROZEN:
395		pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
396		kfree(stale_map[cpu]);
397		stale_map[cpu] = NULL;
398
399		/* We also clear the cpu_vm_mask bits of CPUs going away */
400		clear_tasks_mm_cpumask(cpu);
401	break;
402#endif /* CONFIG_HOTPLUG_CPU */
403	}
404	return NOTIFY_OK;
405}
406
407static struct notifier_block mmu_context_cpu_nb = {
408	.notifier_call	= mmu_context_cpu_notify,
409};
410
411#endif /* CONFIG_SMP */
412
413/*
414 * Initialize the context management stuff.
415 */
416void __init mmu_context_init(void)
417{
418	/* Mark init_mm as being active on all possible CPUs since
419	 * we'll get called with prev == init_mm the first time
420	 * we schedule on a given CPU
421	 */
422	init_mm.context.active = NR_CPUS;
423
424	/*
425	 *   The MPC8xx has only 16 contexts.  We rotate through them on each
426	 * task switch.  A better way would be to keep track of tasks that
427	 * own contexts, and implement an LRU usage.  That way very active
428	 * tasks don't always have to pay the TLB reload overhead.  The
429	 * kernel pages are mapped shared, so the kernel can run on behalf
430	 * of any task that makes a kernel entry.  Shared does not mean they
431	 * are not protected, just that the ASID comparison is not performed.
432	 *      -- Dan
433	 *
434	 * The IBM4xx has 256 contexts, so we can just rotate through these
435	 * as a way of "switching" contexts.  If the TID of the TLB is zero,
436	 * the PID/TID comparison is disabled, so we can use a TID of zero
437	 * to represent all kernel pages as shared among all contexts.
438	 * 	-- Dan
439	 *
440	 * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
441	 * should normally never have to steal though the facility is
442	 * present if needed.
443	 *      -- BenH
444	 */
445	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
446		first_context = 0;
447		last_context = 15;
448		no_selective_tlbil = true;
449	} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
450		first_context = 1;
451		last_context = 65535;
452		no_selective_tlbil = false;
453	} else {
454		first_context = 1;
455		last_context = 255;
456		no_selective_tlbil = false;
457	}
458
459#ifdef DEBUG_CLAMP_LAST_CONTEXT
460	last_context = DEBUG_CLAMP_LAST_CONTEXT;
461#endif
462	/*
463	 * Allocate the maps used by context management
464	 */
465	context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
466	context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0);
467#ifndef CONFIG_SMP
468	stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
469#else
470	stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
471
472	register_cpu_notifier(&mmu_context_cpu_nb);
473#endif
474
475	printk(KERN_INFO
476	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
477	       2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
478	       last_context - first_context + 1);
479
480	/*
481	 * Some processors have too few contexts to reserve one for
482	 * init_mm, and require using context 0 for a normal task.
483	 * Other processors reserve the use of context zero for the kernel.
484	 * This code assumes first_context < 32.
485	 */
486	context_map[0] = (1 << first_context) - 1;
487	next_context = first_context;
488	nr_free_contexts = last_context - first_context + 1;
489}
490
491