root/arch/powerpc/mm/pgtable_64.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pgd_page
  2. pud_page
  3. pmd_page
  4. mark_rodata_ro
  5. mark_initmem_nx

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  This file contains pgtable related functions for 64-bit machines.
   4  *
   5  *  Derived from arch/ppc64/mm/init.c
   6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   7  *
   8  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
   9  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  10  *    Copyright (C) 1996 Paul Mackerras
  11  *
  12  *  Derived from "arch/i386/mm/init.c"
  13  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  14  *
  15  *  Dave Engebretsen <engebret@us.ibm.com>
  16  *      Rework for PPC64 port.
  17  */
  18 
  19 #include <linux/signal.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/errno.h>
  23 #include <linux/string.h>
  24 #include <linux/export.h>
  25 #include <linux/types.h>
  26 #include <linux/mman.h>
  27 #include <linux/mm.h>
  28 #include <linux/swap.h>
  29 #include <linux/stddef.h>
  30 #include <linux/vmalloc.h>
  31 #include <linux/slab.h>
  32 #include <linux/hugetlb.h>
  33 
  34 #include <asm/pgalloc.h>
  35 #include <asm/page.h>
  36 #include <asm/prom.h>
  37 #include <asm/mmu_context.h>
  38 #include <asm/pgtable.h>
  39 #include <asm/mmu.h>
  40 #include <asm/smp.h>
  41 #include <asm/machdep.h>
  42 #include <asm/tlb.h>
  43 #include <asm/processor.h>
  44 #include <asm/cputable.h>
  45 #include <asm/sections.h>
  46 #include <asm/firmware.h>
  47 #include <asm/dma.h>
  48 
  49 #include <mm/mmu_decl.h>
  50 
  51 
  52 #ifdef CONFIG_PPC_BOOK3S_64
  53 /*
  54  * partition table and process table for ISA 3.0
  55  */
  56 struct prtb_entry *process_tb;
  57 struct patb_entry *partition_tb;
  58 /*
  59  * page table size
  60  */
  61 unsigned long __pte_index_size;
  62 EXPORT_SYMBOL(__pte_index_size);
  63 unsigned long __pmd_index_size;
  64 EXPORT_SYMBOL(__pmd_index_size);
  65 unsigned long __pud_index_size;
  66 EXPORT_SYMBOL(__pud_index_size);
  67 unsigned long __pgd_index_size;
  68 EXPORT_SYMBOL(__pgd_index_size);
  69 unsigned long __pud_cache_index;
  70 EXPORT_SYMBOL(__pud_cache_index);
  71 unsigned long __pte_table_size;
  72 EXPORT_SYMBOL(__pte_table_size);
  73 unsigned long __pmd_table_size;
  74 EXPORT_SYMBOL(__pmd_table_size);
  75 unsigned long __pud_table_size;
  76 EXPORT_SYMBOL(__pud_table_size);
  77 unsigned long __pgd_table_size;
  78 EXPORT_SYMBOL(__pgd_table_size);
  79 unsigned long __pmd_val_bits;
  80 EXPORT_SYMBOL(__pmd_val_bits);
  81 unsigned long __pud_val_bits;
  82 EXPORT_SYMBOL(__pud_val_bits);
  83 unsigned long __pgd_val_bits;
  84 EXPORT_SYMBOL(__pgd_val_bits);
  85 unsigned long __kernel_virt_start;
  86 EXPORT_SYMBOL(__kernel_virt_start);
  87 unsigned long __vmalloc_start;
  88 EXPORT_SYMBOL(__vmalloc_start);
  89 unsigned long __vmalloc_end;
  90 EXPORT_SYMBOL(__vmalloc_end);
  91 unsigned long __kernel_io_start;
  92 EXPORT_SYMBOL(__kernel_io_start);
  93 unsigned long __kernel_io_end;
  94 struct page *vmemmap;
  95 EXPORT_SYMBOL(vmemmap);
  96 unsigned long __pte_frag_nr;
  97 EXPORT_SYMBOL(__pte_frag_nr);
  98 unsigned long __pte_frag_size_shift;
  99 EXPORT_SYMBOL(__pte_frag_size_shift);
 100 #endif
 101 
 102 #ifndef __PAGETABLE_PUD_FOLDED
 103 /* 4 level page table */
 104 struct page *pgd_page(pgd_t pgd)
 105 {
 106         if (pgd_is_leaf(pgd)) {
 107                 VM_WARN_ON(!pgd_huge(pgd));
 108                 return pte_page(pgd_pte(pgd));
 109         }
 110         return virt_to_page(pgd_page_vaddr(pgd));
 111 }
 112 #endif
 113 
 114 struct page *pud_page(pud_t pud)
 115 {
 116         if (pud_is_leaf(pud)) {
 117                 VM_WARN_ON(!pud_huge(pud));
 118                 return pte_page(pud_pte(pud));
 119         }
 120         return virt_to_page(pud_page_vaddr(pud));
 121 }
 122 
 123 /*
 124  * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
 125  * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
 126  */
 127 struct page *pmd_page(pmd_t pmd)
 128 {
 129         if (pmd_is_leaf(pmd)) {
 130                 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
 131                 return pte_page(pmd_pte(pmd));
 132         }
 133         return virt_to_page(pmd_page_vaddr(pmd));
 134 }
 135 
 136 #ifdef CONFIG_STRICT_KERNEL_RWX
 137 void mark_rodata_ro(void)
 138 {
 139         if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
 140                 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
 141                 return;
 142         }
 143 
 144         if (radix_enabled())
 145                 radix__mark_rodata_ro();
 146         else
 147                 hash__mark_rodata_ro();
 148 
 149         // mark_initmem_nx() should have already run by now
 150         ptdump_check_wx();
 151 }
 152 
 153 void mark_initmem_nx(void)
 154 {
 155         if (radix_enabled())
 156                 radix__mark_initmem_nx();
 157         else
 158                 hash__mark_initmem_nx();
 159 }
 160 #endif

/* [<][>][^][v][top][bottom][index][help] */