1/* include/asm-generic/tlb.h
2 *
3 *	Generic TLB shootdown code
4 *
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/swap.h>
19#include <asm/pgalloc.h>
20#include <asm/tlbflush.h>
21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23/*
24 * Semi RCU freeing of the page directories.
25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
50 */
51struct mmu_table_batch {
52	struct rcu_head		rcu;
53	unsigned int		nr;
54	void			*tables[0];
55};
56
57#define MAX_TABLE_BATCH		\
58	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
65/*
66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
68 */
69#define MMU_GATHER_BUNDLE	8
70
71struct mmu_gather_batch {
72	struct mmu_gather_batch	*next;
73	unsigned int		nr;
74	unsigned int		max;
75	struct page		*pages[0];
76};
77
78#define MAX_GATHER_BATCH	\
79	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
81/*
82 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
83 * lockups for non-preemptible kernels on huge machines when a lot of memory
84 * is zapped during unmapping.
85 * 10K pages freed at once should be safe even without a preemption point.
86 */
87#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
88
89/* struct mmu_gather is an opaque type used by the mm code for passing around
90 * any data needed by arch specific code for tlb_remove_page.
91 */
92struct mmu_gather {
93	struct mm_struct	*mm;
94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95	struct mmu_table_batch	*batch;
96#endif
97	unsigned long		start;
98	unsigned long		end;
99	/* we are in the middle of an operation to clear
100	 * a full mm and can make some optimizations */
101	unsigned int		fullmm : 1,
102	/* we have performed an operation which
103	 * requires a complete flush of the tlb */
104				need_flush_all : 1;
105
106	struct mmu_gather_batch *active;
107	struct mmu_gather_batch	local;
108	struct page		*__pages[MMU_GATHER_BUNDLE];
109	unsigned int		batch_count;
110};
111
112#define HAVE_GENERIC_MMU_GATHER
113
114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
115void tlb_flush_mmu(struct mmu_gather *tlb);
116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
117							unsigned long end);
118int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
119
120/* tlb_remove_page
121 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
122 *	required.
123 */
124static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
125{
126	if (!__tlb_remove_page(tlb, page))
127		tlb_flush_mmu(tlb);
128}
129
130static inline void __tlb_adjust_range(struct mmu_gather *tlb,
131				      unsigned long address)
132{
133	tlb->start = min(tlb->start, address);
134	tlb->end = max(tlb->end, address + PAGE_SIZE);
135}
136
137static inline void __tlb_reset_range(struct mmu_gather *tlb)
138{
139	if (tlb->fullmm) {
140		tlb->start = tlb->end = ~0;
141	} else {
142		tlb->start = TASK_SIZE;
143		tlb->end = 0;
144	}
145}
146
147/*
148 * In the case of tlb vma handling, we can optimise these away in the
149 * case where we're doing a full MM flush.  When we're doing a munmap,
150 * the vmas are adjusted to only cover the region to be torn down.
151 */
152#ifndef tlb_start_vma
153#define tlb_start_vma(tlb, vma) do { } while (0)
154#endif
155
156#define __tlb_end_vma(tlb, vma)					\
157	do {							\
158		if (!tlb->fullmm && tlb->end) {			\
159			tlb_flush(tlb);				\
160			__tlb_reset_range(tlb);			\
161		}						\
162	} while (0)
163
164#ifndef tlb_end_vma
165#define tlb_end_vma	__tlb_end_vma
166#endif
167
168#ifndef __tlb_remove_tlb_entry
169#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
170#endif
171
172/**
173 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
174 *
175 * Record the fact that pte's were really unmapped by updating the range,
176 * so we can later optimise away the tlb invalidate.   This helps when
177 * userspace is unmapping already-unmapped pages, which happens quite a lot.
178 */
179#define tlb_remove_tlb_entry(tlb, ptep, address)		\
180	do {							\
181		__tlb_adjust_range(tlb, address);		\
182		__tlb_remove_tlb_entry(tlb, ptep, address);	\
183	} while (0)
184
185/**
186 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
187 * This is a nop so far, because only x86 needs it.
188 */
189#ifndef __tlb_remove_pmd_tlb_entry
190#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
191#endif
192
193#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\
194	do {							\
195		__tlb_adjust_range(tlb, address);		\
196		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\
197	} while (0)
198
199#define pte_free_tlb(tlb, ptep, address)			\
200	do {							\
201		__tlb_adjust_range(tlb, address);		\
202		__pte_free_tlb(tlb, ptep, address);		\
203	} while (0)
204
205#ifndef __ARCH_HAS_4LEVEL_HACK
206#define pud_free_tlb(tlb, pudp, address)			\
207	do {							\
208		__tlb_adjust_range(tlb, address);		\
209		__pud_free_tlb(tlb, pudp, address);		\
210	} while (0)
211#endif
212
213#define pmd_free_tlb(tlb, pmdp, address)			\
214	do {							\
215		__tlb_adjust_range(tlb, address);		\
216		__pmd_free_tlb(tlb, pmdp, address);		\
217	} while (0)
218
219#define tlb_migrate_finish(mm) do {} while (0)
220
221#endif /* _ASM_GENERIC__TLB_H */
222