1/* 2 * linux/arch/arm/mm/pgd.c 3 * 4 * Copyright (C) 1998-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/mm.h> 11#include <linux/gfp.h> 12#include <linux/highmem.h> 13#include <linux/slab.h> 14 15#include <asm/cp15.h> 16#include <asm/pgalloc.h> 17#include <asm/page.h> 18#include <asm/tlbflush.h> 19 20#include "mm.h" 21 22#ifdef CONFIG_ARM_LPAE 23#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) 24#define __pgd_free(pgd) kfree(pgd) 25#else 26#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2) 27#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) 28#endif 29 30/* 31 * need to get a 16k page for level 1 32 */ 33pgd_t *pgd_alloc(struct mm_struct *mm) 34{ 35 pgd_t *new_pgd, *init_pgd; 36 pud_t *new_pud, *init_pud; 37 pmd_t *new_pmd, *init_pmd; 38 pte_t *new_pte, *init_pte; 39 40 new_pgd = __pgd_alloc(); 41 if (!new_pgd) 42 goto no_pgd; 43 44 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 45 46 /* 47 * Copy over the kernel and IO PGD entries 48 */ 49 init_pgd = pgd_offset_k(0); 50 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, 51 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 52 53 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 54 55#ifdef CONFIG_ARM_LPAE 56 /* 57 * Allocate PMD table for modules and pkmap mappings. 58 */ 59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), 60 MODULES_VADDR); 61 if (!new_pud) 62 goto no_pud; 63 64 new_pmd = pmd_alloc(mm, new_pud, 0); 65 if (!new_pmd) 66 goto no_pmd; 67#endif 68 69 if (!vectors_high()) { 70 /* 71 * On ARM, first page must always be allocated since it 72 * contains the machine vectors. The vectors are always high 73 * with LPAE. 74 */ 75 new_pud = pud_alloc(mm, new_pgd, 0); 76 if (!new_pud) 77 goto no_pud; 78 79 new_pmd = pmd_alloc(mm, new_pud, 0); 80 if (!new_pmd) 81 goto no_pmd; 82 83 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); 84 if (!new_pte) 85 goto no_pte; 86 87#ifndef CONFIG_ARM_LPAE 88 /* 89 * Modify the PTE pointer to have the correct domain. This 90 * needs to be the vectors domain to avoid the low vectors 91 * being unmapped. 92 */ 93 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; 94 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); 95#endif 96 97 init_pud = pud_offset(init_pgd, 0); 98 init_pmd = pmd_offset(init_pud, 0); 99 init_pte = pte_offset_map(init_pmd, 0); 100 set_pte_ext(new_pte + 0, init_pte[0], 0); 101 set_pte_ext(new_pte + 1, init_pte[1], 0); 102 pte_unmap(init_pte); 103 pte_unmap(new_pte); 104 } 105 106 return new_pgd; 107 108no_pte: 109 pmd_free(mm, new_pmd); 110 mm_dec_nr_pmds(mm); 111no_pmd: 112 pud_free(mm, new_pud); 113no_pud: 114 __pgd_free(new_pgd); 115no_pgd: 116 return NULL; 117} 118 119void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) 120{ 121 pgd_t *pgd; 122 pud_t *pud; 123 pmd_t *pmd; 124 pgtable_t pte; 125 126 if (!pgd_base) 127 return; 128 129 pgd = pgd_base + pgd_index(0); 130 if (pgd_none_or_clear_bad(pgd)) 131 goto no_pgd; 132 133 pud = pud_offset(pgd, 0); 134 if (pud_none_or_clear_bad(pud)) 135 goto no_pud; 136 137 pmd = pmd_offset(pud, 0); 138 if (pmd_none_or_clear_bad(pmd)) 139 goto no_pmd; 140 141 pte = pmd_pgtable(*pmd); 142 pmd_clear(pmd); 143 pte_free(mm, pte); 144 atomic_long_dec(&mm->nr_ptes); 145no_pmd: 146 pud_clear(pud); 147 pmd_free(mm, pmd); 148 mm_dec_nr_pmds(mm); 149no_pud: 150 pgd_clear(pgd); 151 pud_free(mm, pud); 152no_pgd: 153#ifdef CONFIG_ARM_LPAE 154 /* 155 * Free modules/pkmap or identity pmd tables. 156 */ 157 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { 158 if (pgd_none_or_clear_bad(pgd)) 159 continue; 160 if (pgd_val(*pgd) & L_PGD_SWAPPER) 161 continue; 162 pud = pud_offset(pgd, 0); 163 if (pud_none_or_clear_bad(pud)) 164 continue; 165 pmd = pmd_offset(pud, 0); 166 pud_clear(pud); 167 pmd_free(mm, pmd); 168 mm_dec_nr_pmds(mm); 169 pgd_clear(pgd); 170 pud_free(mm, pud); 171 } 172#endif 173 __pgd_free(pgd_base); 174} 175