1/*
2 *    Copyright IBM Corp. 2006
3 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
11#include <linux/hugetlb.h>
12#include <linux/slab.h>
13#include <linux/memblock.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/setup.h>
17#include <asm/tlbflush.h>
18#include <asm/sections.h>
19
20static DEFINE_MUTEX(vmem_mutex);
21
22struct memory_segment {
23	struct list_head list;
24	unsigned long start;
25	unsigned long size;
26};
27
28static LIST_HEAD(mem_segs);
29
30static void __ref *vmem_alloc_pages(unsigned int order)
31{
32	if (slab_is_available())
33		return (void *)__get_free_pages(GFP_KERNEL, order);
34	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
38{
39	pud_t *pud = NULL;
40
41	pud = vmem_alloc_pages(2);
42	if (!pud)
43		return NULL;
44	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
45	return pud;
46}
47
48static inline pmd_t *vmem_pmd_alloc(void)
49{
50	pmd_t *pmd = NULL;
51
52	pmd = vmem_alloc_pages(2);
53	if (!pmd)
54		return NULL;
55	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
56	return pmd;
57}
58
59static pte_t __ref *vmem_pte_alloc(unsigned long address)
60{
61	pte_t *pte;
62
63	if (slab_is_available())
64		pte = (pte_t *) page_table_alloc(&init_mm);
65	else
66		pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
67					  PTRS_PER_PTE * sizeof(pte_t));
68	if (!pte)
69		return NULL;
70	clear_table((unsigned long *) pte, _PAGE_INVALID,
71		    PTRS_PER_PTE * sizeof(pte_t));
72	return pte;
73}
74
75/*
76 * Add a physical memory range to the 1:1 mapping.
77 */
78static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
79{
80	unsigned long end = start + size;
81	unsigned long address = start;
82	pgd_t *pg_dir;
83	pud_t *pu_dir;
84	pmd_t *pm_dir;
85	pte_t *pt_dir;
86	int ret = -ENOMEM;
87
88	while (address < end) {
89		pg_dir = pgd_offset_k(address);
90		if (pgd_none(*pg_dir)) {
91			pu_dir = vmem_pud_alloc();
92			if (!pu_dir)
93				goto out;
94			pgd_populate(&init_mm, pg_dir, pu_dir);
95		}
96		pu_dir = pud_offset(pg_dir, address);
97#ifndef CONFIG_DEBUG_PAGEALLOC
98		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
99		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
100			pud_val(*pu_dir) = __pa(address) |
101				_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
102				(ro ? _REGION_ENTRY_PROTECT : 0);
103			address += PUD_SIZE;
104			continue;
105		}
106#endif
107		if (pud_none(*pu_dir)) {
108			pm_dir = vmem_pmd_alloc();
109			if (!pm_dir)
110				goto out;
111			pud_populate(&init_mm, pu_dir, pm_dir);
112		}
113		pm_dir = pmd_offset(pu_dir, address);
114#ifndef CONFIG_DEBUG_PAGEALLOC
115		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
116		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
117			pmd_val(*pm_dir) = __pa(address) |
118				_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
119				_SEGMENT_ENTRY_YOUNG |
120				(ro ? _SEGMENT_ENTRY_PROTECT : 0);
121			address += PMD_SIZE;
122			continue;
123		}
124#endif
125		if (pmd_none(*pm_dir)) {
126			pt_dir = vmem_pte_alloc(address);
127			if (!pt_dir)
128				goto out;
129			pmd_populate(&init_mm, pm_dir, pt_dir);
130		}
131
132		pt_dir = pte_offset_kernel(pm_dir, address);
133		pte_val(*pt_dir) = __pa(address) |
134			pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
135		address += PAGE_SIZE;
136	}
137	ret = 0;
138out:
139	return ret;
140}
141
142/*
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
145 */
146static void vmem_remove_range(unsigned long start, unsigned long size)
147{
148	unsigned long end = start + size;
149	unsigned long address = start;
150	pgd_t *pg_dir;
151	pud_t *pu_dir;
152	pmd_t *pm_dir;
153	pte_t *pt_dir;
154	pte_t  pte;
155
156	pte_val(pte) = _PAGE_INVALID;
157	while (address < end) {
158		pg_dir = pgd_offset_k(address);
159		if (pgd_none(*pg_dir)) {
160			address += PGDIR_SIZE;
161			continue;
162		}
163		pu_dir = pud_offset(pg_dir, address);
164		if (pud_none(*pu_dir)) {
165			address += PUD_SIZE;
166			continue;
167		}
168		if (pud_large(*pu_dir)) {
169			pud_clear(pu_dir);
170			address += PUD_SIZE;
171			continue;
172		}
173		pm_dir = pmd_offset(pu_dir, address);
174		if (pmd_none(*pm_dir)) {
175			address += PMD_SIZE;
176			continue;
177		}
178		if (pmd_large(*pm_dir)) {
179			pmd_clear(pm_dir);
180			address += PMD_SIZE;
181			continue;
182		}
183		pt_dir = pte_offset_kernel(pm_dir, address);
184		*pt_dir = pte;
185		address += PAGE_SIZE;
186	}
187	flush_tlb_kernel_range(start, end);
188}
189
190/*
191 * Add a backed mem_map array to the virtual mem_map array.
192 */
193int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
194{
195	unsigned long address = start;
196	pgd_t *pg_dir;
197	pud_t *pu_dir;
198	pmd_t *pm_dir;
199	pte_t *pt_dir;
200	int ret = -ENOMEM;
201
202	for (address = start; address < end;) {
203		pg_dir = pgd_offset_k(address);
204		if (pgd_none(*pg_dir)) {
205			pu_dir = vmem_pud_alloc();
206			if (!pu_dir)
207				goto out;
208			pgd_populate(&init_mm, pg_dir, pu_dir);
209		}
210
211		pu_dir = pud_offset(pg_dir, address);
212		if (pud_none(*pu_dir)) {
213			pm_dir = vmem_pmd_alloc();
214			if (!pm_dir)
215				goto out;
216			pud_populate(&init_mm, pu_dir, pm_dir);
217		}
218
219		pm_dir = pmd_offset(pu_dir, address);
220		if (pmd_none(*pm_dir)) {
221			/* Use 1MB frames for vmemmap if available. We always
222			 * use large frames even if they are only partially
223			 * used.
224			 * Otherwise we would have also page tables since
225			 * vmemmap_populate gets called for each section
226			 * separately. */
227			if (MACHINE_HAS_EDAT1) {
228				void *new_page;
229
230				new_page = vmemmap_alloc_block(PMD_SIZE, node);
231				if (!new_page)
232					goto out;
233				pmd_val(*pm_dir) = __pa(new_page) |
234					_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
235				address = (address + PMD_SIZE) & PMD_MASK;
236				continue;
237			}
238			pt_dir = vmem_pte_alloc(address);
239			if (!pt_dir)
240				goto out;
241			pmd_populate(&init_mm, pm_dir, pt_dir);
242		} else if (pmd_large(*pm_dir)) {
243			address = (address + PMD_SIZE) & PMD_MASK;
244			continue;
245		}
246
247		pt_dir = pte_offset_kernel(pm_dir, address);
248		if (pte_none(*pt_dir)) {
249			void *new_page;
250
251			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
252			if (!new_page)
253				goto out;
254			pte_val(*pt_dir) =
255				__pa(new_page) | pgprot_val(PAGE_KERNEL);
256		}
257		address += PAGE_SIZE;
258	}
259	ret = 0;
260out:
261	return ret;
262}
263
264void vmemmap_free(unsigned long start, unsigned long end)
265{
266}
267
268/*
269 * Add memory segment to the segment list if it doesn't overlap with
270 * an already present segment.
271 */
272static int insert_memory_segment(struct memory_segment *seg)
273{
274	struct memory_segment *tmp;
275
276	if (seg->start + seg->size > VMEM_MAX_PHYS ||
277	    seg->start + seg->size < seg->start)
278		return -ERANGE;
279
280	list_for_each_entry(tmp, &mem_segs, list) {
281		if (seg->start >= tmp->start + tmp->size)
282			continue;
283		if (seg->start + seg->size <= tmp->start)
284			continue;
285		return -ENOSPC;
286	}
287	list_add(&seg->list, &mem_segs);
288	return 0;
289}
290
291/*
292 * Remove memory segment from the segment list.
293 */
294static void remove_memory_segment(struct memory_segment *seg)
295{
296	list_del(&seg->list);
297}
298
299static void __remove_shared_memory(struct memory_segment *seg)
300{
301	remove_memory_segment(seg);
302	vmem_remove_range(seg->start, seg->size);
303}
304
305int vmem_remove_mapping(unsigned long start, unsigned long size)
306{
307	struct memory_segment *seg;
308	int ret;
309
310	mutex_lock(&vmem_mutex);
311
312	ret = -ENOENT;
313	list_for_each_entry(seg, &mem_segs, list) {
314		if (seg->start == start && seg->size == size)
315			break;
316	}
317
318	if (seg->start != start || seg->size != size)
319		goto out;
320
321	ret = 0;
322	__remove_shared_memory(seg);
323	kfree(seg);
324out:
325	mutex_unlock(&vmem_mutex);
326	return ret;
327}
328
329int vmem_add_mapping(unsigned long start, unsigned long size)
330{
331	struct memory_segment *seg;
332	int ret;
333
334	mutex_lock(&vmem_mutex);
335	ret = -ENOMEM;
336	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
337	if (!seg)
338		goto out;
339	seg->start = start;
340	seg->size = size;
341
342	ret = insert_memory_segment(seg);
343	if (ret)
344		goto out_free;
345
346	ret = vmem_add_mem(start, size, 0);
347	if (ret)
348		goto out_remove;
349	goto out;
350
351out_remove:
352	__remove_shared_memory(seg);
353out_free:
354	kfree(seg);
355out:
356	mutex_unlock(&vmem_mutex);
357	return ret;
358}
359
360/*
361 * map whole physical memory to virtual memory (identity mapping)
362 * we reserve enough space in the vmalloc area for vmemmap to hotplug
363 * additional memory segments.
364 */
365void __init vmem_map_init(void)
366{
367	unsigned long ro_start, ro_end;
368	struct memblock_region *reg;
369	phys_addr_t start, end;
370
371	ro_start = PFN_ALIGN((unsigned long)&_stext);
372	ro_end = (unsigned long)&_eshared & PAGE_MASK;
373	for_each_memblock(memory, reg) {
374		start = reg->base;
375		end = reg->base + reg->size - 1;
376		if (start >= ro_end || end <= ro_start)
377			vmem_add_mem(start, end - start, 0);
378		else if (start >= ro_start && end <= ro_end)
379			vmem_add_mem(start, end - start, 1);
380		else if (start >= ro_start) {
381			vmem_add_mem(start, ro_end - start, 1);
382			vmem_add_mem(ro_end, end - ro_end, 0);
383		} else if (end < ro_end) {
384			vmem_add_mem(start, ro_start - start, 0);
385			vmem_add_mem(ro_start, end - ro_start, 1);
386		} else {
387			vmem_add_mem(start, ro_start - start, 0);
388			vmem_add_mem(ro_start, ro_end - ro_start, 1);
389			vmem_add_mem(ro_end, end - ro_end, 0);
390		}
391	}
392}
393
394/*
395 * Convert memblock.memory  to a memory segment list so there is a single
396 * list that contains all memory segments.
397 */
398static int __init vmem_convert_memory_chunk(void)
399{
400	struct memblock_region *reg;
401	struct memory_segment *seg;
402
403	mutex_lock(&vmem_mutex);
404	for_each_memblock(memory, reg) {
405		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
406		if (!seg)
407			panic("Out of memory...\n");
408		seg->start = reg->base;
409		seg->size = reg->size;
410		insert_memory_segment(seg);
411	}
412	mutex_unlock(&vmem_mutex);
413	return 0;
414}
415
416core_initcall(vmem_convert_memory_chunk);
417