root/arch/arc/mm/fault.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. handle_kernel_vaddr_fault
  2. do_page_fault

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* Page Fault Handling for ARC (TLB Miss / ProtV)
   3  *
   4  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   5  */
   6 
   7 #include <linux/signal.h>
   8 #include <linux/interrupt.h>
   9 #include <linux/sched/signal.h>
  10 #include <linux/errno.h>
  11 #include <linux/ptrace.h>
  12 #include <linux/uaccess.h>
  13 #include <linux/kdebug.h>
  14 #include <linux/perf_event.h>
  15 #include <linux/mm_types.h>
  16 #include <asm/pgalloc.h>
  17 #include <asm/mmu.h>
  18 
  19 /*
  20  * kernel virtual address is required to implement vmalloc/pkmap/fixmap
  21  * Refer to asm/processor.h for System Memory Map
  22  *
  23  * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
  24  * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
  25  */
  26 noinline static int handle_kernel_vaddr_fault(unsigned long address)
  27 {
  28         /*
  29          * Synchronize this task's top level page-table
  30          * with the 'reference' page table.
  31          */
  32         pgd_t *pgd, *pgd_k;
  33         pud_t *pud, *pud_k;
  34         pmd_t *pmd, *pmd_k;
  35 
  36         pgd = pgd_offset_fast(current->active_mm, address);
  37         pgd_k = pgd_offset_k(address);
  38 
  39         if (!pgd_present(*pgd_k))
  40                 goto bad_area;
  41 
  42         pud = pud_offset(pgd, address);
  43         pud_k = pud_offset(pgd_k, address);
  44         if (!pud_present(*pud_k))
  45                 goto bad_area;
  46 
  47         pmd = pmd_offset(pud, address);
  48         pmd_k = pmd_offset(pud_k, address);
  49         if (!pmd_present(*pmd_k))
  50                 goto bad_area;
  51 
  52         set_pmd(pmd, *pmd_k);
  53 
  54         /* XXX: create the TLB entry here */
  55         return 0;
  56 
  57 bad_area:
  58         return 1;
  59 }
  60 
  61 void do_page_fault(unsigned long address, struct pt_regs *regs)
  62 {
  63         struct vm_area_struct *vma = NULL;
  64         struct task_struct *tsk = current;
  65         struct mm_struct *mm = tsk->mm;
  66         int sig, si_code = SEGV_MAPERR;
  67         unsigned int write = 0, exec = 0, mask;
  68         vm_fault_t fault = VM_FAULT_SIGSEGV;    /* handle_mm_fault() output */
  69         unsigned int flags;                     /* handle_mm_fault() input */
  70 
  71         /*
  72          * NOTE! We MUST NOT take any locks for this case. We may
  73          * be in an interrupt or a critical region, and should
  74          * only copy the information from the master page table,
  75          * nothing more.
  76          */
  77         if (address >= VMALLOC_START && !user_mode(regs)) {
  78                 if (unlikely(handle_kernel_vaddr_fault(address)))
  79                         goto no_context;
  80                 else
  81                         return;
  82         }
  83 
  84         /*
  85          * If we're in an interrupt or have no user
  86          * context, we must not take the fault..
  87          */
  88         if (faulthandler_disabled() || !mm)
  89                 goto no_context;
  90 
  91         if (regs->ecr_cause & ECR_C_PROTV_STORE)        /* ST/EX */
  92                 write = 1;
  93         else if ((regs->ecr_vec == ECR_V_PROTV) &&
  94                  (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
  95                 exec = 1;
  96 
  97         flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  98         if (user_mode(regs))
  99                 flags |= FAULT_FLAG_USER;
 100         if (write)
 101                 flags |= FAULT_FLAG_WRITE;
 102 
 103 retry:
 104         down_read(&mm->mmap_sem);
 105 
 106         vma = find_vma(mm, address);
 107         if (!vma)
 108                 goto bad_area;
 109         if (unlikely(address < vma->vm_start)) {
 110                 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
 111                         goto bad_area;
 112         }
 113 
 114         /*
 115          * vm_area is good, now check permissions for this memory access
 116          */
 117         mask = VM_READ;
 118         if (write)
 119                 mask = VM_WRITE;
 120         if (exec)
 121                 mask = VM_EXEC;
 122 
 123         if (!(vma->vm_flags & mask)) {
 124                 si_code = SEGV_ACCERR;
 125                 goto bad_area;
 126         }
 127 
 128         fault = handle_mm_fault(vma, address, flags);
 129 
 130         /*
 131          * Fault retry nuances
 132          */
 133         if (unlikely(fault & VM_FAULT_RETRY)) {
 134 
 135                 /*
 136                  * If fault needs to be retried, handle any pending signals
 137                  * first (by returning to user mode).
 138                  * mmap_sem already relinquished by core mm for RETRY case
 139                  */
 140                 if (fatal_signal_pending(current)) {
 141                         if (!user_mode(regs))
 142                                 goto no_context;
 143                         return;
 144                 }
 145                 /*
 146                  * retry state machine
 147                  */
 148                 if (flags & FAULT_FLAG_ALLOW_RETRY) {
 149                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
 150                         flags |= FAULT_FLAG_TRIED;
 151                         goto retry;
 152                 }
 153         }
 154 
 155 bad_area:
 156         up_read(&mm->mmap_sem);
 157 
 158         /*
 159          * Major/minor page fault accounting
 160          * (in case of retry we only land here once)
 161          */
 162         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 163 
 164         if (likely(!(fault & VM_FAULT_ERROR))) {
 165                 if (fault & VM_FAULT_MAJOR) {
 166                         tsk->maj_flt++;
 167                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 168                                       regs, address);
 169                 } else {
 170                         tsk->min_flt++;
 171                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 172                                       regs, address);
 173                 }
 174 
 175                 /* Normal return path: fault Handled Gracefully */
 176                 return;
 177         }
 178 
 179         if (!user_mode(regs))
 180                 goto no_context;
 181 
 182         if (fault & VM_FAULT_OOM) {
 183                 pagefault_out_of_memory();
 184                 return;
 185         }
 186 
 187         if (fault & VM_FAULT_SIGBUS) {
 188                 sig = SIGBUS;
 189                 si_code = BUS_ADRERR;
 190         }
 191         else {
 192                 sig = SIGSEGV;
 193         }
 194 
 195         tsk->thread.fault_address = address;
 196         force_sig_fault(sig, si_code, (void __user *)address);
 197         return;
 198 
 199 no_context:
 200         if (fixup_exception(regs))
 201                 return;
 202 
 203         die("Oops", regs, address);
 204 }

/* [<][>][^][v][top][bottom][index][help] */