This source file includes following definitions.
- not_found
- map_pte
- pfn_in_hpage
- check_pte
- page_vma_mapped_walk
- page_mapped_in_vma
1
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
45 return false;
46 } else if (!pte_present(*pvmw->pte))
47 return false;
48 }
49 }
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
52 return true;
53 }
54
55 static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
56 {
57 unsigned long hpage_pfn = page_to_pfn(hpage);
58
59
60 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
61 }
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81 static bool check_pte(struct page_vma_mapped_walk *pvmw)
82 {
83 unsigned long pfn;
84
85 if (pvmw->flags & PVMW_MIGRATION) {
86 swp_entry_t entry;
87 if (!is_swap_pte(*pvmw->pte))
88 return false;
89 entry = pte_to_swp_entry(*pvmw->pte);
90
91 if (!is_migration_entry(entry))
92 return false;
93
94 pfn = migration_entry_to_pfn(entry);
95 } else if (is_swap_pte(*pvmw->pte)) {
96 swp_entry_t entry;
97
98
99 entry = pte_to_swp_entry(*pvmw->pte);
100 if (!is_device_private_entry(entry))
101 return false;
102
103 pfn = device_private_entry_to_pfn(entry);
104 } else {
105 if (!pte_present(*pvmw->pte))
106 return false;
107
108 pfn = pte_pfn(*pvmw->pte);
109 }
110
111 return pfn_in_hpage(pvmw->page, pfn);
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
139 {
140 struct mm_struct *mm = pvmw->vma->vm_mm;
141 struct page *page = pvmw->page;
142 pgd_t *pgd;
143 p4d_t *p4d;
144 pud_t *pud;
145 pmd_t pmde;
146
147
148 if (pvmw->pmd && !pvmw->pte)
149 return not_found(pvmw);
150
151 if (pvmw->pte)
152 goto next_pte;
153
154 if (unlikely(PageHuge(pvmw->page))) {
155
156 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
157 if (!pvmw->pte)
158 return false;
159
160 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
161 spin_lock(pvmw->ptl);
162 if (!check_pte(pvmw))
163 return not_found(pvmw);
164 return true;
165 }
166 restart:
167 pgd = pgd_offset(mm, pvmw->address);
168 if (!pgd_present(*pgd))
169 return false;
170 p4d = p4d_offset(pgd, pvmw->address);
171 if (!p4d_present(*p4d))
172 return false;
173 pud = pud_offset(p4d, pvmw->address);
174 if (!pud_present(*pud))
175 return false;
176 pvmw->pmd = pmd_offset(pud, pvmw->address);
177
178
179
180
181
182 pmde = READ_ONCE(*pvmw->pmd);
183 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
184 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
185 if (likely(pmd_trans_huge(*pvmw->pmd))) {
186 if (pvmw->flags & PVMW_MIGRATION)
187 return not_found(pvmw);
188 if (pmd_page(*pvmw->pmd) != page)
189 return not_found(pvmw);
190 return true;
191 } else if (!pmd_present(*pvmw->pmd)) {
192 if (thp_migration_supported()) {
193 if (!(pvmw->flags & PVMW_MIGRATION))
194 return not_found(pvmw);
195 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
196 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
197
198 if (migration_entry_to_page(entry) != page)
199 return not_found(pvmw);
200 return true;
201 }
202 }
203 return not_found(pvmw);
204 } else {
205
206 spin_unlock(pvmw->ptl);
207 pvmw->ptl = NULL;
208 }
209 } else if (!pmd_present(pmde)) {
210 return false;
211 }
212 if (!map_pte(pvmw))
213 goto next_pte;
214 while (1) {
215 if (check_pte(pvmw))
216 return true;
217 next_pte:
218
219 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
220 return not_found(pvmw);
221 do {
222 pvmw->address += PAGE_SIZE;
223 if (pvmw->address >= pvmw->vma->vm_end ||
224 pvmw->address >=
225 __vma_address(pvmw->page, pvmw->vma) +
226 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
227 return not_found(pvmw);
228
229 if (pvmw->address % PMD_SIZE == 0) {
230 pte_unmap(pvmw->pte);
231 if (pvmw->ptl) {
232 spin_unlock(pvmw->ptl);
233 pvmw->ptl = NULL;
234 }
235 goto restart;
236 } else {
237 pvmw->pte++;
238 }
239 } while (pte_none(*pvmw->pte));
240
241 if (!pvmw->ptl) {
242 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
243 spin_lock(pvmw->ptl);
244 }
245 }
246 }
247
248
249
250
251
252
253
254
255
256
257 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
258 {
259 struct page_vma_mapped_walk pvmw = {
260 .page = page,
261 .vma = vma,
262 .flags = PVMW_SYNC,
263 };
264 unsigned long start, end;
265
266 start = __vma_address(page, vma);
267 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
268
269 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
270 return 0;
271 pvmw.address = max(start, vma->vm_start);
272 if (!page_vma_mapped_walk(&pvmw))
273 return 0;
274 page_vma_mapped_walk_done(&pvmw);
275 return 1;
276 }