1/*
2 *  Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
3 *
4 */
5
6#include <linux/export.h>
7#include <linux/mm.h>
8#include <linux/swap.h>
9#include <linux/init.h>
10#include <linux/bootmem.h>
11#include <linux/pagemap.h>
12#include <linux/percpu.h>
13#include <linux/memblock.h>
14#include <linux/initrd.h>
15
16#include <asm/setup.h>
17#include <asm/page.h>
18#include <asm/pgalloc.h>
19#include <asm/mmu.h>
20#include <asm/mmu_context.h>
21#include <asm/sections.h>
22#include <asm/tlb.h>
23#include <asm/user_gateway.h>
24#include <asm/mmzone.h>
25#include <asm/fixmap.h>
26
27unsigned long pfn_base;
28EXPORT_SYMBOL(pfn_base);
29
30pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
31
32unsigned long empty_zero_page;
33EXPORT_SYMBOL(empty_zero_page);
34
35extern char __user_gateway_start;
36extern char __user_gateway_end;
37
38void *gateway_page;
39
40/*
41 * Insert the gateway page into a set of page tables, creating the
42 * page tables if necessary.
43 */
44static void insert_gateway_page(pgd_t *pgd, unsigned long address)
45{
46	pud_t *pud;
47	pmd_t *pmd;
48	pte_t *pte;
49
50	BUG_ON(!pgd_present(*pgd));
51
52	pud = pud_offset(pgd, address);
53	BUG_ON(!pud_present(*pud));
54
55	pmd = pmd_offset(pud, address);
56	if (!pmd_present(*pmd)) {
57		pte = alloc_bootmem_pages(PAGE_SIZE);
58		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
59	}
60
61	pte = pte_offset_kernel(pmd, address);
62	set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
63}
64
65/* Alloc and map a page in a known location accessible to userspace. */
66static void __init user_gateway_init(void)
67{
68	unsigned long address = USER_GATEWAY_PAGE;
69	int offset = pgd_index(address);
70	pgd_t *pgd;
71
72	gateway_page = alloc_bootmem_pages(PAGE_SIZE);
73
74	pgd = swapper_pg_dir + offset;
75	insert_gateway_page(pgd, address);
76
77#ifdef CONFIG_METAG_META12
78	/*
79	 * Insert the gateway page into our current page tables even
80	 * though we've already inserted it into our reference page
81	 * table (swapper_pg_dir). This is because with a META1 mmu we
82	 * copy just the user address range and not the gateway page
83	 * entry on context switch, see switch_mmu().
84	 */
85	pgd = (pgd_t *)mmu_get_base() + offset;
86	insert_gateway_page(pgd, address);
87#endif /* CONFIG_METAG_META12 */
88
89	BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
90
91	gateway_page += (address & ~PAGE_MASK);
92
93	memcpy(gateway_page, &__user_gateway_start,
94	       &__user_gateway_end - &__user_gateway_start);
95
96	/*
97	 * We don't need to flush the TLB here, there should be no mapping
98	 * present at boot for this address and only valid mappings are in
99	 * the TLB (apart from on Meta 1.x, but those cached invalid
100	 * mappings should be impossible to hit here).
101	 *
102	 * We don't flush the code cache here even though we have written
103	 * code through the data cache and they may not be coherent. At
104	 * this point we assume there is no stale data in the code cache
105	 * for this address so there is no need to flush.
106	 */
107}
108
109static void __init allocate_pgdat(unsigned int nid)
110{
111	unsigned long start_pfn, end_pfn;
112#ifdef CONFIG_NEED_MULTIPLE_NODES
113	unsigned long phys;
114#endif
115
116	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
117
118#ifdef CONFIG_NEED_MULTIPLE_NODES
119	phys = __memblock_alloc_base(sizeof(struct pglist_data),
120				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
121	/* Retry with all of system memory */
122	if (!phys)
123		phys = __memblock_alloc_base(sizeof(struct pglist_data),
124					     SMP_CACHE_BYTES,
125					     memblock_end_of_DRAM());
126	if (!phys)
127		panic("Can't allocate pgdat for node %d\n", nid);
128
129	NODE_DATA(nid) = __va(phys);
130	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
131
132	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
133#endif
134
135	NODE_DATA(nid)->node_start_pfn = start_pfn;
136	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
137}
138
139static void __init bootmem_init_one_node(unsigned int nid)
140{
141	unsigned long total_pages, paddr;
142	unsigned long end_pfn;
143	struct pglist_data *p;
144
145	p = NODE_DATA(nid);
146
147	/* Nothing to do.. */
148	if (!p->node_spanned_pages)
149		return;
150
151	end_pfn = pgdat_end_pfn(p);
152#ifdef CONFIG_HIGHMEM
153	if (end_pfn > max_low_pfn)
154		end_pfn = max_low_pfn;
155#endif
156
157	total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
158
159	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
160	if (!paddr)
161		panic("Can't allocate bootmap for nid[%d]\n", nid);
162
163	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
164
165	free_bootmem_with_active_regions(nid, end_pfn);
166
167	/*
168	 * XXX Handle initial reservations for the system memory node
169	 * only for the moment, we'll refactor this later for handling
170	 * reservations in other nodes.
171	 */
172	if (nid == 0) {
173		struct memblock_region *reg;
174
175		/* Reserve the sections we're already using. */
176		for_each_memblock(reserved, reg) {
177			unsigned long size = reg->size;
178
179#ifdef CONFIG_HIGHMEM
180			/* ...but not highmem */
181			if (PFN_DOWN(reg->base) >= highstart_pfn)
182				continue;
183
184			if (PFN_UP(reg->base + size) > highstart_pfn)
185				size = (highstart_pfn - PFN_DOWN(reg->base))
186				       << PAGE_SHIFT;
187#endif
188
189			reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
190		}
191	}
192
193	sparse_memory_present_with_active_regions(nid);
194}
195
196static void __init do_init_bootmem(void)
197{
198	struct memblock_region *reg;
199	int i;
200
201	/* Add active regions with valid PFNs. */
202	for_each_memblock(memory, reg) {
203		unsigned long start_pfn, end_pfn;
204		start_pfn = memblock_region_memory_base_pfn(reg);
205		end_pfn = memblock_region_memory_end_pfn(reg);
206		memblock_set_node(PFN_PHYS(start_pfn),
207				  PFN_PHYS(end_pfn - start_pfn),
208				  &memblock.memory, 0);
209	}
210
211	/* All of system RAM sits in node 0 for the non-NUMA case */
212	allocate_pgdat(0);
213	node_set_online(0);
214
215	soc_mem_setup();
216
217	for_each_online_node(i)
218		bootmem_init_one_node(i);
219
220	sparse_init();
221}
222
223extern char _heap_start[];
224
225static void __init init_and_reserve_mem(void)
226{
227	unsigned long start_pfn, heap_start;
228	u64 base = min_low_pfn << PAGE_SHIFT;
229	u64 size = (max_low_pfn << PAGE_SHIFT) - base;
230
231	heap_start = (unsigned long) &_heap_start;
232
233	memblock_add(base, size);
234
235	/*
236	 * Partially used pages are not usable - thus
237	 * we are rounding upwards:
238	 */
239	start_pfn = PFN_UP(__pa(heap_start));
240
241	/*
242	 * Reserve the kernel text.
243	 */
244	memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
245
246#ifdef CONFIG_HIGHMEM
247	/*
248	 * Add & reserve highmem, so page structures are initialised.
249	 */
250	base = highstart_pfn << PAGE_SHIFT;
251	size = (highend_pfn << PAGE_SHIFT) - base;
252	if (size) {
253		memblock_add(base, size);
254		memblock_reserve(base, size);
255	}
256#endif
257}
258
259#ifdef CONFIG_HIGHMEM
260/*
261 * Ensure we have allocated page tables in swapper_pg_dir for the
262 * fixed mappings range from 'start' to 'end'.
263 */
264static void __init allocate_pgtables(unsigned long start, unsigned long end)
265{
266	pgd_t *pgd;
267	pmd_t *pmd;
268	pte_t *pte;
269	int i, j;
270	unsigned long vaddr;
271
272	vaddr = start;
273	i = pgd_index(vaddr);
274	j = pmd_index(vaddr);
275	pgd = swapper_pg_dir + i;
276
277	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
278		pmd = (pmd_t *)pgd;
279		for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
280			vaddr += PMD_SIZE;
281
282			if (!pmd_none(*pmd))
283				continue;
284
285			pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
286			pmd_populate_kernel(&init_mm, pmd, pte);
287		}
288		j = 0;
289	}
290}
291
292static void __init fixedrange_init(void)
293{
294	unsigned long vaddr, end;
295	pgd_t *pgd;
296	pud_t *pud;
297	pmd_t *pmd;
298	pte_t *pte;
299
300	/*
301	 * Fixed mappings:
302	 */
303	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
304	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
305	allocate_pgtables(vaddr, end);
306
307	/*
308	 * Permanent kmaps:
309	 */
310	vaddr = PKMAP_BASE;
311	allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
312
313	pgd = swapper_pg_dir + pgd_index(vaddr);
314	pud = pud_offset(pgd, vaddr);
315	pmd = pmd_offset(pud, vaddr);
316	pte = pte_offset_kernel(pmd, vaddr);
317	pkmap_page_table = pte;
318}
319#endif /* CONFIG_HIGHMEM */
320
321/*
322 * paging_init() continues the virtual memory environment setup which
323 * was begun by the code in arch/metag/kernel/setup.c.
324 */
325void __init paging_init(unsigned long mem_end)
326{
327	unsigned long max_zone_pfns[MAX_NR_ZONES];
328	int nid;
329
330	init_and_reserve_mem();
331
332	memblock_allow_resize();
333
334	memblock_dump_all();
335
336	nodes_clear(node_online_map);
337
338	init_new_context(&init_task, &init_mm);
339
340	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
341
342	do_init_bootmem();
343	mmu_init(mem_end);
344
345#ifdef CONFIG_HIGHMEM
346	fixedrange_init();
347	kmap_init();
348#endif
349
350	/* Initialize the zero page to a bootmem page, already zeroed. */
351	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
352
353	user_gateway_init();
354
355	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
356
357	for_each_online_node(nid) {
358		pg_data_t *pgdat = NODE_DATA(nid);
359		unsigned long low, start_pfn;
360
361		start_pfn = pgdat->bdata->node_min_pfn;
362		low = pgdat->bdata->node_low_pfn;
363
364		if (max_zone_pfns[ZONE_NORMAL] < low)
365			max_zone_pfns[ZONE_NORMAL] = low;
366
367#ifdef CONFIG_HIGHMEM
368		max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
369#endif
370		pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
371			nid, start_pfn, low);
372	}
373
374	free_area_init_nodes(max_zone_pfns);
375}
376
377void __init mem_init(void)
378{
379#ifdef CONFIG_HIGHMEM
380	unsigned long tmp;
381
382	/*
383	 * Explicitly reset zone->managed_pages because highmem pages are
384	 * freed before calling free_all_bootmem();
385	 */
386	reset_all_zones_managed_pages();
387	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
388		free_highmem_page(pfn_to_page(tmp));
389#endif /* CONFIG_HIGHMEM */
390
391	free_all_bootmem();
392	mem_init_print_info(NULL);
393	show_mem(0);
394}
395
396void free_initmem(void)
397{
398	free_initmem_default(POISON_FREE_INITMEM);
399}
400
401#ifdef CONFIG_BLK_DEV_INITRD
402void free_initrd_mem(unsigned long start, unsigned long end)
403{
404	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
405			   "initrd");
406}
407#endif
408