1/*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
4 *
5 *  Derived from arch/ppc64/mm/init.c:
6 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 *    Copyright (C) 1996 Paul Mackerras
11 *
12 *  Derived from "arch/i386/mm/init.c"
13 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
14 *
15 *  Dave Engebretsen <engebret@us.ibm.com>
16 *      Rework for PPC64 port.
17 *
18 *  This program is free software; you can redistribute it and/or
19 *  modify it under the terms of the GNU General Public License
20 *  as published by the Free Software Foundation; either version
21 *  2 of the License, or (at your option) any later version.
22 */
23
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/percpu.h>
27#include <linux/hardirq.h>
28#include <asm/pgalloc.h>
29#include <asm/tlbflush.h>
30#include <asm/tlb.h>
31#include <asm/bug.h>
32
33#include <trace/events/thp.h>
34
35DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
36
37/*
38 * A linux PTE was changed and the corresponding hash table entry
39 * neesd to be flushed. This function will either perform the flush
40 * immediately or will batch it up if the current CPU has an active
41 * batch on it.
42 */
43void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
44		     pte_t *ptep, unsigned long pte, int huge)
45{
46	unsigned long vpn;
47	struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
48	unsigned long vsid;
49	unsigned int psize;
50	int ssize;
51	real_pte_t rpte;
52	int i;
53
54	i = batch->index;
55
56	/* Get page size (maybe move back to caller).
57	 *
58	 * NOTE: when using special 64K mappings in 4K environment like
59	 * for SPEs, we obtain the page size from the slice, which thus
60	 * must still exist (and thus the VMA not reused) at the time
61	 * of this call
62	 */
63	if (huge) {
64#ifdef CONFIG_HUGETLB_PAGE
65		psize = get_slice_psize(mm, addr);
66		/* Mask the address for the correct page size */
67		addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
68#else
69		BUG();
70		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
71#endif
72	} else {
73		psize = pte_pagesize_index(mm, addr, pte);
74		/* Mask the address for the standard page size.  If we
75		 * have a 64k page kernel, but the hardware does not
76		 * support 64k pages, this might be different from the
77		 * hardware page size encoded in the slice table. */
78		addr &= PAGE_MASK;
79	}
80
81
82	/* Build full vaddr */
83	if (!is_kernel_addr(addr)) {
84		ssize = user_segment_size(addr);
85		vsid = get_vsid(mm->context.id, addr, ssize);
86	} else {
87		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88		ssize = mmu_kernel_ssize;
89	}
90	WARN_ON(vsid == 0);
91	vpn = hpt_vpn(addr, vsid, ssize);
92	rpte = __real_pte(__pte(pte), ptep);
93
94	/*
95	 * Check if we have an active batch on this CPU. If not, just
96	 * flush now and return. For now, we don global invalidates
97	 * in that case, might be worth testing the mm cpu mask though
98	 * and decide to use local invalidates instead...
99	 */
100	if (!batch->active) {
101		flush_hash_page(vpn, rpte, psize, ssize, 0);
102		put_cpu_var(ppc64_tlb_batch);
103		return;
104	}
105
106	/*
107	 * This can happen when we are in the middle of a TLB batch and
108	 * we encounter memory pressure (eg copy_page_range when it tries
109	 * to allocate a new pte). If we have to reclaim memory and end
110	 * up scanning and resetting referenced bits then our batch context
111	 * will change mid stream.
112	 *
113	 * We also need to ensure only one page size is present in a given
114	 * batch
115	 */
116	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
117		       batch->ssize != ssize)) {
118		__flush_tlb_pending(batch);
119		i = 0;
120	}
121	if (i == 0) {
122		batch->mm = mm;
123		batch->psize = psize;
124		batch->ssize = ssize;
125	}
126	batch->pte[i] = rpte;
127	batch->vpn[i] = vpn;
128	batch->index = ++i;
129	if (i >= PPC64_TLB_BATCH_NR)
130		__flush_tlb_pending(batch);
131	put_cpu_var(ppc64_tlb_batch);
132}
133
134/*
135 * This function is called when terminating an mmu batch or when a batch
136 * is full. It will perform the flush of all the entries currently stored
137 * in a batch.
138 *
139 * Must be called from within some kind of spinlock/non-preempt region...
140 */
141void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
142{
143	const struct cpumask *tmp;
144	int i, local = 0;
145
146	i = batch->index;
147	tmp = cpumask_of(smp_processor_id());
148	if (cpumask_equal(mm_cpumask(batch->mm), tmp))
149		local = 1;
150	if (i == 1)
151		flush_hash_page(batch->vpn[0], batch->pte[0],
152				batch->psize, batch->ssize, local);
153	else
154		flush_hash_range(i, local);
155	batch->index = 0;
156}
157
158void tlb_flush(struct mmu_gather *tlb)
159{
160	struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
161
162	/* If there's a TLB batch pending, then we must flush it because the
163	 * pages are going to be freed and we really don't want to have a CPU
164	 * access a freed page because it has a stale TLB
165	 */
166	if (tlbbatch->index)
167		__flush_tlb_pending(tlbbatch);
168
169	put_cpu_var(ppc64_tlb_batch);
170}
171
172/**
173 * __flush_hash_table_range - Flush all HPTEs for a given address range
174 *                            from the hash table (and the TLB). But keeps
175 *                            the linux PTEs intact.
176 *
177 * @mm		: mm_struct of the target address space (generally init_mm)
178 * @start	: starting address
179 * @end         : ending address (not included in the flush)
180 *
181 * This function is mostly to be used by some IO hotplug code in order
182 * to remove all hash entries from a given address range used to map IO
183 * space on a removed PCI-PCI bidge without tearing down the full mapping
184 * since 64K pages may overlap with other bridges when using 64K pages
185 * with 4K HW pages on IO space.
186 *
187 * Because of that usage pattern, it is implemented for small size rather
188 * than speed.
189 */
190void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
191			      unsigned long end)
192{
193	int hugepage_shift;
194	unsigned long flags;
195
196	start = _ALIGN_DOWN(start, PAGE_SIZE);
197	end = _ALIGN_UP(end, PAGE_SIZE);
198
199	BUG_ON(!mm->pgd);
200
201	/* Note: Normally, we should only ever use a batch within a
202	 * PTE locked section. This violates the rule, but will work
203	 * since we don't actually modify the PTEs, we just flush the
204	 * hash while leaving the PTEs intact (including their reference
205	 * to being hashed). This is not the most performance oriented
206	 * way to do things but is fine for our needs here.
207	 */
208	local_irq_save(flags);
209	arch_enter_lazy_mmu_mode();
210	for (; start < end; start += PAGE_SIZE) {
211		pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start,
212							&hugepage_shift);
213		unsigned long pte;
214
215		if (ptep == NULL)
216			continue;
217		pte = pte_val(*ptep);
218		if (hugepage_shift)
219			trace_hugepage_invalidate(start, pte);
220		if (!(pte & _PAGE_HASHPTE))
221			continue;
222		if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
223			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
224		else
225			hpte_need_flush(mm, start, ptep, pte, 0);
226	}
227	arch_leave_lazy_mmu_mode();
228	local_irq_restore(flags);
229}
230
231void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
232{
233	pte_t *pte;
234	pte_t *start_pte;
235	unsigned long flags;
236
237	addr = _ALIGN_DOWN(addr, PMD_SIZE);
238	/* Note: Normally, we should only ever use a batch within a
239	 * PTE locked section. This violates the rule, but will work
240	 * since we don't actually modify the PTEs, we just flush the
241	 * hash while leaving the PTEs intact (including their reference
242	 * to being hashed). This is not the most performance oriented
243	 * way to do things but is fine for our needs here.
244	 */
245	local_irq_save(flags);
246	arch_enter_lazy_mmu_mode();
247	start_pte = pte_offset_map(pmd, addr);
248	for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
249		unsigned long pteval = pte_val(*pte);
250		if (pteval & _PAGE_HASHPTE)
251			hpte_need_flush(mm, addr, pte, pteval, 0);
252		addr += PAGE_SIZE;
253	}
254	arch_leave_lazy_mmu_mode();
255	local_irq_restore(flags);
256}
257