This source file includes following definitions.
- uncached_ipi_visibility
- uncached_ipi_mc_drain
- uncached_add_chunk
- uncached_alloc_page
- uncached_free_page
- uncached_build_memmap
- uncached_init
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/efi.h>
19 #include <linux/nmi.h>
20 #include <linux/genalloc.h>
21 #include <linux/gfp.h>
22 #include <asm/page.h>
23 #include <asm/pal.h>
24 #include <asm/pgtable.h>
25 #include <linux/atomic.h>
26 #include <asm/tlbflush.h>
27
28
29 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
30
31 struct uncached_pool {
32 struct gen_pool *pool;
33 struct mutex add_chunk_mutex;
34 int nchunks_added;
35 atomic_t status;
36 };
37
38 #define MAX_CONVERTED_CHUNKS_PER_NODE 2
39
40 struct uncached_pool uncached_pools[MAX_NUMNODES];
41
42
43 static void uncached_ipi_visibility(void *data)
44 {
45 int status;
46 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
47
48 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
49 if ((status != PAL_VISIBILITY_OK) &&
50 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
51 atomic_inc(&uc_pool->status);
52 }
53
54
55 static void uncached_ipi_mc_drain(void *data)
56 {
57 int status;
58 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
59
60 status = ia64_pal_mc_drain();
61 if (status != PAL_STATUS_SUCCESS)
62 atomic_inc(&uc_pool->status);
63 }
64
65
66
67
68
69
70
71
72
73
74
75 static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
76 {
77 struct page *page;
78 int status, i, nchunks_added = uc_pool->nchunks_added;
79 unsigned long c_addr, uc_addr;
80
81 if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
82 return -1;
83
84 if (uc_pool->nchunks_added > nchunks_added) {
85
86 mutex_unlock(&uc_pool->add_chunk_mutex);
87 return 0;
88 }
89
90 if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
91 mutex_unlock(&uc_pool->add_chunk_mutex);
92 return -1;
93 }
94
95
96
97 page = __alloc_pages_node(nid,
98 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
99 IA64_GRANULE_SHIFT-PAGE_SHIFT);
100 if (!page) {
101 mutex_unlock(&uc_pool->add_chunk_mutex);
102 return -1;
103 }
104
105
106
107 c_addr = (unsigned long)page_address(page);
108 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
109
110
111
112
113
114
115 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
116 SetPageUncached(&page[i]);
117
118 flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
119
120 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
121 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
122 atomic_set(&uc_pool->status, 0);
123 smp_call_function(uncached_ipi_visibility, uc_pool, 1);
124 if (atomic_read(&uc_pool->status))
125 goto failed;
126 } else if (status != PAL_VISIBILITY_OK)
127 goto failed;
128
129 preempt_disable();
130
131 flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
132
133
134 local_flush_tlb_all();
135
136 preempt_enable();
137
138 status = ia64_pal_mc_drain();
139 if (status != PAL_STATUS_SUCCESS)
140 goto failed;
141 atomic_set(&uc_pool->status, 0);
142 smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
143 if (atomic_read(&uc_pool->status))
144 goto failed;
145
146
147
148
149
150 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
151 if (status)
152 goto failed;
153
154 uc_pool->nchunks_added++;
155 mutex_unlock(&uc_pool->add_chunk_mutex);
156 return 0;
157
158
159 failed:
160 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
161 ClearPageUncached(&page[i]);
162
163 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
164 mutex_unlock(&uc_pool->add_chunk_mutex);
165 return -1;
166 }
167
168
169
170
171
172
173
174
175
176
177
178
179 unsigned long uncached_alloc_page(int starting_nid, int n_pages)
180 {
181 unsigned long uc_addr;
182 struct uncached_pool *uc_pool;
183 int nid;
184
185 if (unlikely(starting_nid >= MAX_NUMNODES))
186 return 0;
187
188 if (starting_nid < 0)
189 starting_nid = numa_node_id();
190 nid = starting_nid;
191
192 do {
193 if (!node_state(nid, N_HIGH_MEMORY))
194 continue;
195 uc_pool = &uncached_pools[nid];
196 if (uc_pool->pool == NULL)
197 continue;
198 do {
199 uc_addr = gen_pool_alloc(uc_pool->pool,
200 n_pages * PAGE_SIZE);
201 if (uc_addr != 0)
202 return uc_addr;
203 } while (uncached_add_chunk(uc_pool, nid) == 0);
204
205 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
206
207 return 0;
208 }
209 EXPORT_SYMBOL(uncached_alloc_page);
210
211
212
213
214
215
216
217
218
219
220 void uncached_free_page(unsigned long uc_addr, int n_pages)
221 {
222 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
223 struct gen_pool *pool = uncached_pools[nid].pool;
224
225 if (unlikely(pool == NULL))
226 return;
227
228 if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
229 panic("uncached_free_page invalid address %lx\n", uc_addr);
230
231 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
232 }
233 EXPORT_SYMBOL(uncached_free_page);
234
235
236
237
238
239
240
241
242
243
244
245
246 static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
247 {
248 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
249 struct gen_pool *pool = uncached_pools[nid].pool;
250 size_t size = uc_end - uc_start;
251
252 touch_softlockup_watchdog();
253
254 if (pool != NULL) {
255 memset((char *)uc_start, 0, size);
256 (void) gen_pool_add(pool, uc_start, size, nid);
257 }
258 return 0;
259 }
260
261
262 static int __init uncached_init(void)
263 {
264 int nid;
265
266 for_each_node_state(nid, N_ONLINE) {
267 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
268 mutex_init(&uncached_pools[nid].add_chunk_mutex);
269 }
270
271 efi_memmap_walk_uc(uncached_build_memmap, NULL);
272 return 0;
273 }
274
275 __initcall(uncached_init);