1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/cpu_pm.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/hugetlb.h>
17#include <linux/module.h>
18
19#include <asm/cpu.h>
20#include <asm/cpu-type.h>
21#include <asm/bootinfo.h>
22#include <asm/mmu_context.h>
23#include <asm/pgtable.h>
24#include <asm/tlb.h>
25#include <asm/tlbmisc.h>
26
27extern void build_tlb_refill_handler(void);
28
29/*
30 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
31 * unfortunately, itlb is not totally transparent to software.
32 */
33static inline void flush_itlb(void)
34{
35	switch (current_cpu_type()) {
36	case CPU_LOONGSON2:
37	case CPU_LOONGSON3:
38		write_c0_diag(4);
39		break;
40	default:
41		break;
42	}
43}
44
45static inline void flush_itlb_vm(struct vm_area_struct *vma)
46{
47	if (vma->vm_flags & VM_EXEC)
48		flush_itlb();
49}
50
51void local_flush_tlb_all(void)
52{
53	unsigned long flags;
54	unsigned long old_ctx;
55	int entry, ftlbhighset;
56
57	local_irq_save(flags);
58	/* Save old context and create impossible VPN2 value */
59	old_ctx = read_c0_entryhi();
60	htw_stop();
61	write_c0_entrylo0(0);
62	write_c0_entrylo1(0);
63
64	entry = read_c0_wired();
65
66	/* Blast 'em all away. */
67	if (cpu_has_tlbinv) {
68		if (current_cpu_data.tlbsizevtlb) {
69			write_c0_index(0);
70			mtc0_tlbw_hazard();
71			tlbinvf();  /* invalidate VTLB */
72		}
73		ftlbhighset = current_cpu_data.tlbsizevtlb +
74			current_cpu_data.tlbsizeftlbsets;
75		for (entry = current_cpu_data.tlbsizevtlb;
76		     entry < ftlbhighset;
77		     entry++) {
78			write_c0_index(entry);
79			mtc0_tlbw_hazard();
80			tlbinvf();  /* invalidate one FTLB set */
81		}
82	} else {
83		while (entry < current_cpu_data.tlbsize) {
84			/* Make sure all entries differ. */
85			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
86			write_c0_index(entry);
87			mtc0_tlbw_hazard();
88			tlb_write_indexed();
89			entry++;
90		}
91	}
92	tlbw_use_hazard();
93	write_c0_entryhi(old_ctx);
94	htw_start();
95	flush_itlb();
96	local_irq_restore(flags);
97}
98EXPORT_SYMBOL(local_flush_tlb_all);
99
100/* All entries common to a mm share an asid.  To effectively flush
101   these entries, we just bump the asid. */
102void local_flush_tlb_mm(struct mm_struct *mm)
103{
104	int cpu;
105
106	preempt_disable();
107
108	cpu = smp_processor_id();
109
110	if (cpu_context(cpu, mm) != 0) {
111		drop_mmu_context(mm, cpu);
112	}
113
114	preempt_enable();
115}
116
117void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
118	unsigned long end)
119{
120	struct mm_struct *mm = vma->vm_mm;
121	int cpu = smp_processor_id();
122
123	if (cpu_context(cpu, mm) != 0) {
124		unsigned long size, flags;
125
126		local_irq_save(flags);
127		start = round_down(start, PAGE_SIZE << 1);
128		end = round_up(end, PAGE_SIZE << 1);
129		size = (end - start) >> (PAGE_SHIFT + 1);
130		if (size <= (current_cpu_data.tlbsizeftlbsets ?
131			     current_cpu_data.tlbsize / 8 :
132			     current_cpu_data.tlbsize / 2)) {
133			int oldpid = read_c0_entryhi();
134			int newpid = cpu_asid(cpu, mm);
135
136			htw_stop();
137			while (start < end) {
138				int idx;
139
140				write_c0_entryhi(start | newpid);
141				start += (PAGE_SIZE << 1);
142				mtc0_tlbw_hazard();
143				tlb_probe();
144				tlb_probe_hazard();
145				idx = read_c0_index();
146				write_c0_entrylo0(0);
147				write_c0_entrylo1(0);
148				if (idx < 0)
149					continue;
150				/* Make sure all entries differ. */
151				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
152				mtc0_tlbw_hazard();
153				tlb_write_indexed();
154			}
155			tlbw_use_hazard();
156			write_c0_entryhi(oldpid);
157			htw_start();
158		} else {
159			drop_mmu_context(mm, cpu);
160		}
161		flush_itlb();
162		local_irq_restore(flags);
163	}
164}
165
166void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
167{
168	unsigned long size, flags;
169
170	local_irq_save(flags);
171	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
172	size = (size + 1) >> 1;
173	if (size <= (current_cpu_data.tlbsizeftlbsets ?
174		     current_cpu_data.tlbsize / 8 :
175		     current_cpu_data.tlbsize / 2)) {
176		int pid = read_c0_entryhi();
177
178		start &= (PAGE_MASK << 1);
179		end += ((PAGE_SIZE << 1) - 1);
180		end &= (PAGE_MASK << 1);
181		htw_stop();
182
183		while (start < end) {
184			int idx;
185
186			write_c0_entryhi(start);
187			start += (PAGE_SIZE << 1);
188			mtc0_tlbw_hazard();
189			tlb_probe();
190			tlb_probe_hazard();
191			idx = read_c0_index();
192			write_c0_entrylo0(0);
193			write_c0_entrylo1(0);
194			if (idx < 0)
195				continue;
196			/* Make sure all entries differ. */
197			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
198			mtc0_tlbw_hazard();
199			tlb_write_indexed();
200		}
201		tlbw_use_hazard();
202		write_c0_entryhi(pid);
203		htw_start();
204	} else {
205		local_flush_tlb_all();
206	}
207	flush_itlb();
208	local_irq_restore(flags);
209}
210
211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212{
213	int cpu = smp_processor_id();
214
215	if (cpu_context(cpu, vma->vm_mm) != 0) {
216		unsigned long flags;
217		int oldpid, newpid, idx;
218
219		newpid = cpu_asid(cpu, vma->vm_mm);
220		page &= (PAGE_MASK << 1);
221		local_irq_save(flags);
222		oldpid = read_c0_entryhi();
223		htw_stop();
224		write_c0_entryhi(page | newpid);
225		mtc0_tlbw_hazard();
226		tlb_probe();
227		tlb_probe_hazard();
228		idx = read_c0_index();
229		write_c0_entrylo0(0);
230		write_c0_entrylo1(0);
231		if (idx < 0)
232			goto finish;
233		/* Make sure all entries differ. */
234		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
235		mtc0_tlbw_hazard();
236		tlb_write_indexed();
237		tlbw_use_hazard();
238
239	finish:
240		write_c0_entryhi(oldpid);
241		htw_start();
242		flush_itlb_vm(vma);
243		local_irq_restore(flags);
244	}
245}
246
247/*
248 * This one is only used for pages with the global bit set so we don't care
249 * much about the ASID.
250 */
251void local_flush_tlb_one(unsigned long page)
252{
253	unsigned long flags;
254	int oldpid, idx;
255
256	local_irq_save(flags);
257	oldpid = read_c0_entryhi();
258	htw_stop();
259	page &= (PAGE_MASK << 1);
260	write_c0_entryhi(page);
261	mtc0_tlbw_hazard();
262	tlb_probe();
263	tlb_probe_hazard();
264	idx = read_c0_index();
265	write_c0_entrylo0(0);
266	write_c0_entrylo1(0);
267	if (idx >= 0) {
268		/* Make sure all entries differ. */
269		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
270		mtc0_tlbw_hazard();
271		tlb_write_indexed();
272		tlbw_use_hazard();
273	}
274	write_c0_entryhi(oldpid);
275	htw_start();
276	flush_itlb();
277	local_irq_restore(flags);
278}
279
280/*
281 * We will need multiple versions of update_mmu_cache(), one that just
282 * updates the TLB with the new pte(s), and another which also checks
283 * for the R4k "end of page" hardware bug and does the needy.
284 */
285void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
286{
287	unsigned long flags;
288	pgd_t *pgdp;
289	pud_t *pudp;
290	pmd_t *pmdp;
291	pte_t *ptep;
292	int idx, pid;
293
294	/*
295	 * Handle debugger faulting in for debugee.
296	 */
297	if (current->active_mm != vma->vm_mm)
298		return;
299
300	local_irq_save(flags);
301
302	htw_stop();
303	pid = read_c0_entryhi() & ASID_MASK;
304	address &= (PAGE_MASK << 1);
305	write_c0_entryhi(address | pid);
306	pgdp = pgd_offset(vma->vm_mm, address);
307	mtc0_tlbw_hazard();
308	tlb_probe();
309	tlb_probe_hazard();
310	pudp = pud_offset(pgdp, address);
311	pmdp = pmd_offset(pudp, address);
312	idx = read_c0_index();
313#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
314	/* this could be a huge page  */
315	if (pmd_huge(*pmdp)) {
316		unsigned long lo;
317		write_c0_pagemask(PM_HUGE_MASK);
318		ptep = (pte_t *)pmdp;
319		lo = pte_to_entrylo(pte_val(*ptep));
320		write_c0_entrylo0(lo);
321		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
322
323		mtc0_tlbw_hazard();
324		if (idx < 0)
325			tlb_write_random();
326		else
327			tlb_write_indexed();
328		tlbw_use_hazard();
329		write_c0_pagemask(PM_DEFAULT_MASK);
330	} else
331#endif
332	{
333		ptep = pte_offset_map(pmdp, address);
334
335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
336#ifdef CONFIG_XPA
337		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
338		writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
339		ptep++;
340		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
341		writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
342#else
343		write_c0_entrylo0(ptep->pte_high);
344		ptep++;
345		write_c0_entrylo1(ptep->pte_high);
346#endif
347#else
348		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
349		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
350#endif
351		mtc0_tlbw_hazard();
352		if (idx < 0)
353			tlb_write_random();
354		else
355			tlb_write_indexed();
356	}
357	tlbw_use_hazard();
358	htw_start();
359	flush_itlb_vm(vma);
360	local_irq_restore(flags);
361}
362
363void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
364		     unsigned long entryhi, unsigned long pagemask)
365{
366#ifdef CONFIG_XPA
367	panic("Broken for XPA kernels");
368#else
369	unsigned long flags;
370	unsigned long wired;
371	unsigned long old_pagemask;
372	unsigned long old_ctx;
373
374	local_irq_save(flags);
375	/* Save old context and create impossible VPN2 value */
376	old_ctx = read_c0_entryhi();
377	htw_stop();
378	old_pagemask = read_c0_pagemask();
379	wired = read_c0_wired();
380	write_c0_wired(wired + 1);
381	write_c0_index(wired);
382	tlbw_use_hazard();	/* What is the hazard here? */
383	write_c0_pagemask(pagemask);
384	write_c0_entryhi(entryhi);
385	write_c0_entrylo0(entrylo0);
386	write_c0_entrylo1(entrylo1);
387	mtc0_tlbw_hazard();
388	tlb_write_indexed();
389	tlbw_use_hazard();
390
391	write_c0_entryhi(old_ctx);
392	tlbw_use_hazard();	/* What is the hazard here? */
393	htw_start();
394	write_c0_pagemask(old_pagemask);
395	local_flush_tlb_all();
396	local_irq_restore(flags);
397#endif
398}
399
400#ifdef CONFIG_TRANSPARENT_HUGEPAGE
401
402int __init has_transparent_hugepage(void)
403{
404	unsigned int mask;
405	unsigned long flags;
406
407	local_irq_save(flags);
408	write_c0_pagemask(PM_HUGE_MASK);
409	back_to_back_c0_hazard();
410	mask = read_c0_pagemask();
411	write_c0_pagemask(PM_DEFAULT_MASK);
412
413	local_irq_restore(flags);
414
415	return mask == PM_HUGE_MASK;
416}
417
418#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
419
420/*
421 * Used for loading TLB entries before trap_init() has started, when we
422 * don't actually want to add a wired entry which remains throughout the
423 * lifetime of the system
424 */
425
426int temp_tlb_entry __cpuinitdata;
427
428__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
429			       unsigned long entryhi, unsigned long pagemask)
430{
431	int ret = 0;
432	unsigned long flags;
433	unsigned long wired;
434	unsigned long old_pagemask;
435	unsigned long old_ctx;
436
437	local_irq_save(flags);
438	/* Save old context and create impossible VPN2 value */
439	htw_stop();
440	old_ctx = read_c0_entryhi();
441	old_pagemask = read_c0_pagemask();
442	wired = read_c0_wired();
443	if (--temp_tlb_entry < wired) {
444		printk(KERN_WARNING
445		       "No TLB space left for add_temporary_entry\n");
446		ret = -ENOSPC;
447		goto out;
448	}
449
450	write_c0_index(temp_tlb_entry);
451	write_c0_pagemask(pagemask);
452	write_c0_entryhi(entryhi);
453	write_c0_entrylo0(entrylo0);
454	write_c0_entrylo1(entrylo1);
455	mtc0_tlbw_hazard();
456	tlb_write_indexed();
457	tlbw_use_hazard();
458
459	write_c0_entryhi(old_ctx);
460	write_c0_pagemask(old_pagemask);
461	htw_start();
462out:
463	local_irq_restore(flags);
464	return ret;
465}
466
467static int ntlb;
468static int __init set_ntlb(char *str)
469{
470	get_option(&str, &ntlb);
471	return 1;
472}
473
474__setup("ntlb=", set_ntlb);
475
476/*
477 * Configure TLB (for init or after a CPU has been powered off).
478 */
479static void r4k_tlb_configure(void)
480{
481	/*
482	 * You should never change this register:
483	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
484	 *     the value in the c0_pagemask register.
485	 *   - The entire mm handling assumes the c0_pagemask register to
486	 *     be set to fixed-size pages.
487	 */
488	write_c0_pagemask(PM_DEFAULT_MASK);
489	write_c0_wired(0);
490	if (current_cpu_type() == CPU_R10000 ||
491	    current_cpu_type() == CPU_R12000 ||
492	    current_cpu_type() == CPU_R14000 ||
493	    current_cpu_type() == CPU_R16000)
494		write_c0_framemask(0);
495
496	if (cpu_has_rixi) {
497		/*
498		 * Enable the no read, no exec bits, and enable large physical
499		 * address.
500		 */
501#ifdef CONFIG_64BIT
502		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
503#else
504		set_c0_pagegrain(PG_RIE | PG_XIE);
505#endif
506	}
507
508	temp_tlb_entry = current_cpu_data.tlbsize - 1;
509
510	/* From this point on the ARC firmware is dead.	 */
511	local_flush_tlb_all();
512
513	/* Did I tell you that ARC SUCKS?  */
514}
515
516void tlb_init(void)
517{
518	r4k_tlb_configure();
519
520	if (ntlb) {
521		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
522			int wired = current_cpu_data.tlbsize - ntlb;
523			write_c0_wired(wired);
524			write_c0_index(wired-1);
525			printk("Restricting TLB to %d entries\n", ntlb);
526		} else
527			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
528	}
529
530	build_tlb_refill_handler();
531}
532
533static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
534			       void *v)
535{
536	switch (cmd) {
537	case CPU_PM_ENTER_FAILED:
538	case CPU_PM_EXIT:
539		r4k_tlb_configure();
540		break;
541	}
542
543	return NOTIFY_OK;
544}
545
546static struct notifier_block r4k_tlb_pm_notifier_block = {
547	.notifier_call = r4k_tlb_pm_notifier,
548};
549
550static int __init r4k_tlb_init_pm(void)
551{
552	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
553}
554arch_initcall(r4k_tlb_init_pm);
555