root/arch/powerpc/mm/book3s64/hash_hugetlbpage.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __hash_page_huge
  2. huge_ptep_modify_prot_start
  3. huge_ptep_modify_prot_commit
  4. hugetlbpage_init_default

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
   4  *
   5  * Copyright (C) 2003 David Gibson, IBM Corporation.
   6  *
   7  * Based on the IA-32 version:
   8  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   9  */
  10 
  11 #include <linux/mm.h>
  12 #include <linux/hugetlb.h>
  13 #include <asm/pgtable.h>
  14 #include <asm/pgalloc.h>
  15 #include <asm/cacheflush.h>
  16 #include <asm/machdep.h>
  17 
  18 unsigned int hpage_shift;
  19 EXPORT_SYMBOL(hpage_shift);
  20 
  21 extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
  22                                   unsigned long pa, unsigned long rlags,
  23                                   unsigned long vflags, int psize, int ssize);
  24 
  25 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
  26                      pte_t *ptep, unsigned long trap, unsigned long flags,
  27                      int ssize, unsigned int shift, unsigned int mmu_psize)
  28 {
  29         real_pte_t rpte;
  30         unsigned long vpn;
  31         unsigned long old_pte, new_pte;
  32         unsigned long rflags, pa;
  33         long slot, offset;
  34 
  35         BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
  36 
  37         /* Search the Linux page table for a match with va */
  38         vpn = hpt_vpn(ea, vsid, ssize);
  39 
  40         /*
  41          * At this point, we have a pte (old_pte) which can be used to build
  42          * or update an HPTE. There are 2 cases:
  43          *
  44          * 1. There is a valid (present) pte with no associated HPTE (this is
  45          *      the most common case)
  46          * 2. There is a valid (present) pte with an associated HPTE. The
  47          *      current values of the pp bits in the HPTE prevent access
  48          *      because we are doing software DIRTY bit management and the
  49          *      page is currently not DIRTY.
  50          */
  51 
  52 
  53         do {
  54                 old_pte = pte_val(*ptep);
  55                 /* If PTE busy, retry the access */
  56                 if (unlikely(old_pte & H_PAGE_BUSY))
  57                         return 0;
  58                 /* If PTE permissions don't match, take page fault */
  59                 if (unlikely(!check_pte_access(access, old_pte)))
  60                         return 1;
  61 
  62                 /*
  63                  * Try to lock the PTE, add ACCESSED and DIRTY if it was
  64                  * a write access
  65                  */
  66                 new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
  67                 if (access & _PAGE_WRITE)
  68                         new_pte |= _PAGE_DIRTY;
  69         } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
  70 
  71         /* Make sure this is a hugetlb entry */
  72         if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
  73                 return 0;
  74 
  75         rflags = htab_convert_pte_flags(new_pte);
  76         if (unlikely(mmu_psize == MMU_PAGE_16G))
  77                 offset = PTRS_PER_PUD;
  78         else
  79                 offset = PTRS_PER_PMD;
  80         rpte = __real_pte(__pte(old_pte), ptep, offset);
  81 
  82         if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  83                 /*
  84                  * No CPU has hugepages but lacks no execute, so we
  85                  * don't need to worry about that case
  86                  */
  87                 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
  88 
  89         /* Check if pte already has an hpte (case 2) */
  90         if (unlikely(old_pte & H_PAGE_HASHPTE)) {
  91                 /* There MIGHT be an HPTE for this pte */
  92                 unsigned long gslot;
  93 
  94                 gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0);
  95                 if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, mmu_psize,
  96                                                mmu_psize, ssize, flags) == -1)
  97                         old_pte &= ~_PAGE_HPTEFLAGS;
  98         }
  99 
 100         if (likely(!(old_pte & H_PAGE_HASHPTE))) {
 101                 unsigned long hash = hpt_hash(vpn, shift, ssize);
 102 
 103                 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
 104 
 105                 /* clear HPTE slot informations in new PTE */
 106                 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
 107 
 108                 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
 109                                              mmu_psize, ssize);
 110 
 111                 /*
 112                  * Hypervisor failure. Restore old pte and return -1
 113                  * similar to __hash_page_*
 114                  */
 115                 if (unlikely(slot == -2)) {
 116                         *ptep = __pte(old_pte);
 117                         hash_failure_debug(ea, access, vsid, trap, ssize,
 118                                            mmu_psize, mmu_psize, old_pte);
 119                         return -1;
 120                 }
 121 
 122                 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
 123         }
 124 
 125         /*
 126          * No need to use ldarx/stdcx here
 127          */
 128         *ptep = __pte(new_pte & ~H_PAGE_BUSY);
 129         return 0;
 130 }
 131 
 132 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
 133                                   unsigned long addr, pte_t *ptep)
 134 {
 135         unsigned long pte_val;
 136         /*
 137          * Clear the _PAGE_PRESENT so that no hardware parallel update is
 138          * possible. Also keep the pte_present true so that we don't take
 139          * wrong fault.
 140          */
 141         pte_val = pte_update(vma->vm_mm, addr, ptep,
 142                              _PAGE_PRESENT, _PAGE_INVALID, 1);
 143 
 144         return __pte(pte_val);
 145 }
 146 
 147 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 148                                   pte_t *ptep, pte_t old_pte, pte_t pte)
 149 {
 150 
 151         if (radix_enabled())
 152                 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
 153                                                            old_pte, pte);
 154         set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
 155 }
 156 
 157 void hugetlbpage_init_default(void)
 158 {
 159         /* Set default large page size. Currently, we pick 16M or 1M
 160          * depending on what is available
 161          */
 162         if (mmu_psize_defs[MMU_PAGE_16M].shift)
 163                 hpage_shift = mmu_psize_defs[MMU_PAGE_16M].shift;
 164         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
 165                 hpage_shift = mmu_psize_defs[MMU_PAGE_1M].shift;
 166         else if (mmu_psize_defs[MMU_PAGE_2M].shift)
 167                 hpage_shift = mmu_psize_defs[MMU_PAGE_2M].shift;
 168 }

/* [<][>][^][v][top][bottom][index][help] */