1/* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6#include <linux/stddef.h> 7#include <linux/module.h> 8#include <linux/bootmem.h> 9#include <linux/highmem.h> 10#include <linux/mm.h> 11#include <linux/swap.h> 12#include <linux/slab.h> 13#include <asm/fixmap.h> 14#include <asm/page.h> 15#include <as-layout.h> 16#include <init.h> 17#include <kern.h> 18#include <kern_util.h> 19#include <mem_user.h> 20#include <os.h> 21 22/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ 23unsigned long *empty_zero_page = NULL; 24EXPORT_SYMBOL(empty_zero_page); 25/* allocated in paging_init and unchanged thereafter */ 26static unsigned long *empty_bad_page = NULL; 27 28/* 29 * Initialized during boot, and readonly for initializing page tables 30 * afterwards 31 */ 32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 33 34/* Initialized at boot time, and readonly after that */ 35unsigned long long highmem; 36int kmalloc_ok = 0; 37 38/* Used during early boot */ 39static unsigned long brk_end; 40 41void __init mem_init(void) 42{ 43 /* clear the zero-page */ 44 memset(empty_zero_page, 0, PAGE_SIZE); 45 46 /* Map in the area just after the brk now that kmalloc is about 47 * to be turned on. 48 */ 49 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); 50 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); 51 free_bootmem(__pa(brk_end), uml_reserved - brk_end); 52 uml_reserved = brk_end; 53 54 /* this will put all low memory onto the freelists */ 55 free_all_bootmem(); 56 max_low_pfn = totalram_pages; 57 max_pfn = totalram_pages; 58 mem_init_print_info(NULL); 59 kmalloc_ok = 1; 60} 61 62/* 63 * Create a page table and place a pointer to it in a middle page 64 * directory entry. 65 */ 66static void __init one_page_table_init(pmd_t *pmd) 67{ 68 if (pmd_none(*pmd)) { 69 pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 70 set_pmd(pmd, __pmd(_KERNPG_TABLE + 71 (unsigned long) __pa(pte))); 72 if (pte != pte_offset_kernel(pmd, 0)) 73 BUG(); 74 } 75} 76 77static void __init one_md_table_init(pud_t *pud) 78{ 79#ifdef CONFIG_3_LEVEL_PGTABLES 80 pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 81 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); 82 if (pmd_table != pmd_offset(pud, 0)) 83 BUG(); 84#endif 85} 86 87static void __init fixrange_init(unsigned long start, unsigned long end, 88 pgd_t *pgd_base) 89{ 90 pgd_t *pgd; 91 pud_t *pud; 92 pmd_t *pmd; 93 int i, j; 94 unsigned long vaddr; 95 96 vaddr = start; 97 i = pgd_index(vaddr); 98 j = pmd_index(vaddr); 99 pgd = pgd_base + i; 100 101 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { 102 pud = pud_offset(pgd, vaddr); 103 if (pud_none(*pud)) 104 one_md_table_init(pud); 105 pmd = pmd_offset(pud, vaddr); 106 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { 107 one_page_table_init(pmd); 108 vaddr += PMD_SIZE; 109 } 110 j = 0; 111 } 112} 113 114static void __init fixaddr_user_init( void) 115{ 116#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA 117 long size = FIXADDR_USER_END - FIXADDR_USER_START; 118 pgd_t *pgd; 119 pud_t *pud; 120 pmd_t *pmd; 121 pte_t *pte; 122 phys_t p; 123 unsigned long v, vaddr = FIXADDR_USER_START; 124 125 if (!size) 126 return; 127 128 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); 129 v = (unsigned long) alloc_bootmem_low_pages(size); 130 memcpy((void *) v , (void *) FIXADDR_USER_START, size); 131 p = __pa(v); 132 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, 133 p += PAGE_SIZE) { 134 pgd = swapper_pg_dir + pgd_index(vaddr); 135 pud = pud_offset(pgd, vaddr); 136 pmd = pmd_offset(pud, vaddr); 137 pte = pte_offset_kernel(pmd, vaddr); 138 pte_set_val(*pte, p, PAGE_READONLY); 139 } 140#endif 141} 142 143void __init paging_init(void) 144{ 145 unsigned long zones_size[MAX_NR_ZONES], vaddr; 146 int i; 147 148 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 149 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 150 for (i = 0; i < ARRAY_SIZE(zones_size); i++) 151 zones_size[i] = 0; 152 153 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) - 154 (uml_physmem >> PAGE_SHIFT); 155 free_area_init(zones_size); 156 157 /* 158 * Fixed mappings, only the page table structure has to be 159 * created - mappings will be set by set_fixmap(): 160 */ 161 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 162 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); 163 164 fixaddr_user_init(); 165} 166 167/* 168 * This can't do anything because nothing in the kernel image can be freed 169 * since it's not in kernel physical memory. 170 */ 171 172void free_initmem(void) 173{ 174} 175 176#ifdef CONFIG_BLK_DEV_INITRD 177void free_initrd_mem(unsigned long start, unsigned long end) 178{ 179 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 180} 181#endif 182 183/* Allocate and free page tables. */ 184 185pgd_t *pgd_alloc(struct mm_struct *mm) 186{ 187 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); 188 189 if (pgd) { 190 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 191 memcpy(pgd + USER_PTRS_PER_PGD, 192 swapper_pg_dir + USER_PTRS_PER_PGD, 193 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 194 } 195 return pgd; 196} 197 198void pgd_free(struct mm_struct *mm, pgd_t *pgd) 199{ 200 free_page((unsigned long) pgd); 201} 202 203pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 204{ 205 pte_t *pte; 206 207 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 208 return pte; 209} 210 211pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 212{ 213 struct page *pte; 214 215 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 216 if (!pte) 217 return NULL; 218 if (!pgtable_page_ctor(pte)) { 219 __free_page(pte); 220 return NULL; 221 } 222 return pte; 223} 224 225#ifdef CONFIG_3_LEVEL_PGTABLES 226pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 227{ 228 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); 229 230 if (pmd) 231 memset(pmd, 0, PAGE_SIZE); 232 233 return pmd; 234} 235#endif 236 237void *uml_kmalloc(int size, int flags) 238{ 239 return kmalloc(size, flags); 240} 241