root/arch/parisc/include/asm/pgalloc.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pgd_alloc
  2. pgd_free
  3. pgd_populate
  4. pmd_alloc_one
  5. pmd_free
  6. pmd_populate_kernel

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_PGALLOC_H
   3 #define _ASM_PGALLOC_H
   4 
   5 #include <linux/gfp.h>
   6 #include <linux/mm.h>
   7 #include <linux/threads.h>
   8 #include <asm/processor.h>
   9 #include <asm/fixmap.h>
  10 
  11 #include <asm/cache.h>
  12 
  13 #include <asm-generic/pgalloc.h>        /* for pte_{alloc,free}_one */
  14 
  15 /* Allocate the top level pgd (page directory)
  16  *
  17  * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
  18  * allocate the first pmd adjacent to the pgd.  This means that we can
  19  * subtract a constant offset to get to it.  The pmd and pgd sizes are
  20  * arranged so that a single pmd covers 4GB (giving a full 64-bit
  21  * process access to 8TB) so our lookups are effectively L2 for the
  22  * first 4GB of the kernel (i.e. for all ILP32 processes and all the
  23  * kernel for machines with under 4GB of memory) */
  24 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  25 {
  26         pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
  27                                                PGD_ALLOC_ORDER);
  28         pgd_t *actual_pgd = pgd;
  29 
  30         if (likely(pgd != NULL)) {
  31                 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
  32 #if CONFIG_PGTABLE_LEVELS == 3
  33                 actual_pgd += PTRS_PER_PGD;
  34                 /* Populate first pmd with allocated memory.  We mark it
  35                  * with PxD_FLAG_ATTACHED as a signal to the system that this
  36                  * pmd entry may not be cleared. */
  37                 __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | 
  38                                         PxD_FLAG_VALID | 
  39                                         PxD_FLAG_ATTACHED) 
  40                         + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
  41                 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
  42                  * a signal that this pmd may not be freed */
  43                 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
  44 #endif
  45         }
  46         spin_lock_init(pgd_spinlock(actual_pgd));
  47         return actual_pgd;
  48 }
  49 
  50 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  51 {
  52 #if CONFIG_PGTABLE_LEVELS == 3
  53         pgd -= PTRS_PER_PGD;
  54 #endif
  55         free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
  56 }
  57 
  58 #if CONFIG_PGTABLE_LEVELS == 3
  59 
  60 /* Three Level Page Table Support for pmd's */
  61 
  62 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  63 {
  64         __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
  65                         (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
  66 }
  67 
  68 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  69 {
  70         pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
  71         if (pmd)
  72                 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
  73         return pmd;
  74 }
  75 
  76 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  77 {
  78         if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
  79                 /*
  80                  * This is the permanent pmd attached to the pgd;
  81                  * cannot free it.
  82                  * Increment the counter to compensate for the decrement
  83                  * done by generic mm code.
  84                  */
  85                 mm_inc_nr_pmds(mm);
  86                 return;
  87         }
  88         free_pages((unsigned long)pmd, PMD_ORDER);
  89 }
  90 
  91 #else
  92 
  93 /* Two Level Page Table Support for pmd's */
  94 
  95 /*
  96  * allocating and freeing a pmd is trivial: the 1-entry pmd is
  97  * inside the pgd, so has no extra memory associated with it.
  98  */
  99 
 100 #define pmd_alloc_one(mm, addr)         ({ BUG(); ((pmd_t *)2); })
 101 #define pmd_free(mm, x)                 do { } while (0)
 102 #define pgd_populate(mm, pmd, pte)      BUG()
 103 
 104 #endif
 105 
 106 static inline void
 107 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 108 {
 109 #if CONFIG_PGTABLE_LEVELS == 3
 110         /* preserve the gateway marker if this is the beginning of
 111          * the permanent pmd */
 112         if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 113                 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
 114                                  PxD_FLAG_VALID |
 115                                  PxD_FLAG_ATTACHED) 
 116                         + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
 117         else
 118 #endif
 119                 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) 
 120                         + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
 121 }
 122 
 123 #define pmd_populate(mm, pmd, pte_page) \
 124         pmd_populate_kernel(mm, pmd, page_address(pte_page))
 125 #define pmd_pgtable(pmd) pmd_page(pmd)
 126 
 127 #endif

/* [<][>][^][v][top][bottom][index][help] */