root/arch/x86/include/asm/pgtable_32.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_X86_PGTABLE_32_H
   3 #define _ASM_X86_PGTABLE_32_H
   4 
   5 #include <asm/pgtable_32_types.h>
   6 
   7 /*
   8  * The Linux memory management assumes a three-level page table setup. On
   9  * the i386, we use that, but "fold" the mid level into the top-level page
  10  * table, so that we physically have the same two-level page table as the
  11  * i386 mmu expects.
  12  *
  13  * This file contains the functions and defines necessary to modify and use
  14  * the i386 page table tree.
  15  */
  16 #ifndef __ASSEMBLY__
  17 #include <asm/processor.h>
  18 #include <linux/threads.h>
  19 #include <asm/paravirt.h>
  20 
  21 #include <linux/bitops.h>
  22 #include <linux/list.h>
  23 #include <linux/spinlock.h>
  24 
  25 struct mm_struct;
  26 struct vm_area_struct;
  27 
  28 extern pgd_t swapper_pg_dir[1024];
  29 extern pgd_t initial_page_table[1024];
  30 extern pmd_t initial_pg_pmd[];
  31 
  32 void paging_init(void);
  33 void sync_initial_page_table(void);
  34 
  35 /*
  36  * Define this if things work differently on an i386 and an i486:
  37  * it will (on an i486) warn about kernel memory accesses that are
  38  * done without a 'access_ok( ..)'
  39  */
  40 #undef TEST_ACCESS_OK
  41 
  42 #ifdef CONFIG_X86_PAE
  43 # include <asm/pgtable-3level.h>
  44 #else
  45 # include <asm/pgtable-2level.h>
  46 #endif
  47 
  48 #if defined(CONFIG_HIGHPTE)
  49 #define pte_offset_map(dir, address)                                    \
  50         ((pte_t *)kmap_atomic(pmd_page(*(dir))) +               \
  51          pte_index((address)))
  52 #define pte_unmap(pte) kunmap_atomic((pte))
  53 #else
  54 #define pte_offset_map(dir, address)                                    \
  55         ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
  56 #define pte_unmap(pte) do { } while (0)
  57 #endif
  58 
  59 /* Clear a kernel PTE and flush it from the TLB */
  60 #define kpte_clear_flush(ptep, vaddr)           \
  61 do {                                            \
  62         pte_clear(&init_mm, (vaddr), (ptep));   \
  63         __flush_tlb_one_kernel((vaddr));                \
  64 } while (0)
  65 
  66 #endif /* !__ASSEMBLY__ */
  67 
  68 /*
  69  * kern_addr_valid() is (1) for FLATMEM and (0) for
  70  * SPARSEMEM and DISCONTIGMEM
  71  */
  72 #ifdef CONFIG_FLATMEM
  73 #define kern_addr_valid(addr)   (1)
  74 #else
  75 #define kern_addr_valid(kaddr)  (0)
  76 #endif
  77 
  78 /*
  79  * This is how much memory in addition to the memory covered up to
  80  * and including _end we need mapped initially.
  81  * We need:
  82  *     (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
  83  *     (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
  84  *
  85  * Modulo rounding, each megabyte assigned here requires a kilobyte of
  86  * memory, which is currently unreclaimed.
  87  *
  88  * This should be a multiple of a page.
  89  *
  90  * KERNEL_IMAGE_SIZE should be greater than pa(_end)
  91  * and small than max_low_pfn, otherwise will waste some page table entries
  92  */
  93 #if PTRS_PER_PMD > 1
  94 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
  95 #else
  96 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
  97 #endif
  98 
  99 /*
 100  * Number of possible pages in the lowmem region.
 101  *
 102  * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
 103  * gas warning about overflowing shift count when gas has been compiled
 104  * with only a host target support using a 32-bit type for internal
 105  * representation.
 106  */
 107 #define LOWMEM_PAGES ((((_ULL(2)<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
 108 
 109 #endif /* _ASM_X86_PGTABLE_32_H */

/* [<][>][^][v][top][bottom][index][help] */