This source file includes following definitions.
- pcpu_chunk_page
- pcpu_get_pages
- pcpu_free_pages
- pcpu_alloc_pages
- pcpu_pre_unmap_flush
- __pcpu_unmap_pages
- pcpu_unmap_pages
- pcpu_post_unmap_tlb_flush
- __pcpu_map_pages
- pcpu_map_pages
- pcpu_post_map_flush
- pcpu_populate_chunk
- pcpu_depopulate_chunk
- pcpu_create_chunk
- pcpu_destroy_chunk
- pcpu_addr_to_page
- pcpu_verify_alloc_info
1
2
3
4
5
6
7
8
9
10
11
12 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
13 unsigned int cpu, int page_idx)
14 {
15
16 WARN_ON(chunk->immutable);
17
18 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
19 }
20
21
22
23
24
25
26
27
28
29
30
31 static struct page **pcpu_get_pages(void)
32 {
33 static struct page **pages;
34 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
35
36 lockdep_assert_held(&pcpu_alloc_mutex);
37
38 if (!pages)
39 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
40 return pages;
41 }
42
43
44
45
46
47
48
49
50
51
52
53 static void pcpu_free_pages(struct pcpu_chunk *chunk,
54 struct page **pages, int page_start, int page_end)
55 {
56 unsigned int cpu;
57 int i;
58
59 for_each_possible_cpu(cpu) {
60 for (i = page_start; i < page_end; i++) {
61 struct page *page = pages[pcpu_page_idx(cpu, i)];
62
63 if (page)
64 __free_page(page);
65 }
66 }
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80
81 static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
82 struct page **pages, int page_start, int page_end,
83 gfp_t gfp)
84 {
85 unsigned int cpu, tcpu;
86 int i;
87
88 gfp |= __GFP_HIGHMEM;
89
90 for_each_possible_cpu(cpu) {
91 for (i = page_start; i < page_end; i++) {
92 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
93
94 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
95 if (!*pagep)
96 goto err;
97 }
98 }
99 return 0;
100
101 err:
102 while (--i >= page_start)
103 __free_page(pages[pcpu_page_idx(cpu, i)]);
104
105 for_each_possible_cpu(tcpu) {
106 if (tcpu == cpu)
107 break;
108 for (i = page_start; i < page_end; i++)
109 __free_page(pages[pcpu_page_idx(tcpu, i)]);
110 }
111 return -ENOMEM;
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
127 int page_start, int page_end)
128 {
129 flush_cache_vunmap(
130 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
131 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
132 }
133
134 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
135 {
136 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
137 }
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152 static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
153 struct page **pages, int page_start, int page_end)
154 {
155 unsigned int cpu;
156 int i;
157
158 for_each_possible_cpu(cpu) {
159 for (i = page_start; i < page_end; i++) {
160 struct page *page;
161
162 page = pcpu_chunk_page(chunk, cpu, i);
163 WARN_ON(!page);
164 pages[pcpu_page_idx(cpu, i)] = page;
165 }
166 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
167 page_end - page_start);
168 }
169 }
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
185 int page_start, int page_end)
186 {
187 flush_tlb_kernel_range(
188 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
189 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
190 }
191
192 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
193 int nr_pages)
194 {
195 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
196 PAGE_KERNEL, pages);
197 }
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213 static int pcpu_map_pages(struct pcpu_chunk *chunk,
214 struct page **pages, int page_start, int page_end)
215 {
216 unsigned int cpu, tcpu;
217 int i, err;
218
219 for_each_possible_cpu(cpu) {
220 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
221 &pages[pcpu_page_idx(cpu, page_start)],
222 page_end - page_start);
223 if (err < 0)
224 goto err;
225
226 for (i = page_start; i < page_end; i++)
227 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
228 chunk);
229 }
230 return 0;
231 err:
232 for_each_possible_cpu(tcpu) {
233 if (tcpu == cpu)
234 break;
235 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
236 page_end - page_start);
237 }
238 pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
239 return err;
240 }
241
242
243
244
245
246
247
248
249
250
251
252
253
254 static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
255 int page_start, int page_end)
256 {
257 flush_cache_vmap(
258 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
259 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
260 }
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
276 int page_start, int page_end, gfp_t gfp)
277 {
278 struct page **pages;
279
280 pages = pcpu_get_pages();
281 if (!pages)
282 return -ENOMEM;
283
284 if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
285 return -ENOMEM;
286
287 if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
288 pcpu_free_pages(chunk, pages, page_start, page_end);
289 return -ENOMEM;
290 }
291 pcpu_post_map_flush(chunk, page_start, page_end);
292
293 return 0;
294 }
295
296
297
298
299
300
301
302
303
304
305
306
307
308 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
309 int page_start, int page_end)
310 {
311 struct page **pages;
312
313
314
315
316
317
318 pages = pcpu_get_pages();
319 BUG_ON(!pages);
320
321
322 pcpu_pre_unmap_flush(chunk, page_start, page_end);
323
324 pcpu_unmap_pages(chunk, pages, page_start, page_end);
325
326
327
328 pcpu_free_pages(chunk, pages, page_start, page_end);
329 }
330
331 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
332 {
333 struct pcpu_chunk *chunk;
334 struct vm_struct **vms;
335
336 chunk = pcpu_alloc_chunk(gfp);
337 if (!chunk)
338 return NULL;
339
340 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
341 pcpu_nr_groups, pcpu_atom_size);
342 if (!vms) {
343 pcpu_free_chunk(chunk);
344 return NULL;
345 }
346
347 chunk->data = vms;
348 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
349
350 pcpu_stats_chunk_alloc();
351 trace_percpu_create_chunk(chunk->base_addr);
352
353 return chunk;
354 }
355
356 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
357 {
358 if (!chunk)
359 return;
360
361 pcpu_stats_chunk_dealloc();
362 trace_percpu_destroy_chunk(chunk->base_addr);
363
364 if (chunk->data)
365 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
366 pcpu_free_chunk(chunk);
367 }
368
369 static struct page *pcpu_addr_to_page(void *addr)
370 {
371 return vmalloc_to_page(addr);
372 }
373
374 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
375 {
376
377 return 0;
378 }