root/arch/riscv/mm/fault.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_page_fault

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
   4  *  Lennox Wu <lennox.wu@sunplusct.com>
   5  *  Chen Liqin <liqin.chen@sunplusct.com>
   6  * Copyright (C) 2012 Regents of the University of California
   7  */
   8 
   9 
  10 #include <linux/mm.h>
  11 #include <linux/kernel.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/perf_event.h>
  14 #include <linux/signal.h>
  15 #include <linux/uaccess.h>
  16 
  17 #include <asm/pgalloc.h>
  18 #include <asm/ptrace.h>
  19 #include <asm/tlbflush.h>
  20 
  21 #include "../kernel/head.h"
  22 
  23 /*
  24  * This routine handles page faults.  It determines the address and the
  25  * problem, and then passes it off to one of the appropriate routines.
  26  */
  27 asmlinkage void do_page_fault(struct pt_regs *regs)
  28 {
  29         struct task_struct *tsk;
  30         struct vm_area_struct *vma;
  31         struct mm_struct *mm;
  32         unsigned long addr, cause;
  33         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  34         int code = SEGV_MAPERR;
  35         vm_fault_t fault;
  36 
  37         cause = regs->scause;
  38         addr = regs->sbadaddr;
  39 
  40         tsk = current;
  41         mm = tsk->mm;
  42 
  43         /*
  44          * Fault-in kernel-space virtual memory on-demand.
  45          * The 'reference' page table is init_mm.pgd.
  46          *
  47          * NOTE! We MUST NOT take any locks for this case. We may
  48          * be in an interrupt or a critical region, and should
  49          * only copy the information from the master page table,
  50          * nothing more.
  51          */
  52         if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
  53                 goto vmalloc_fault;
  54 
  55         /* Enable interrupts if they were enabled in the parent context. */
  56         if (likely(regs->sstatus & SR_SPIE))
  57                 local_irq_enable();
  58 
  59         /*
  60          * If we're in an interrupt, have no user context, or are running
  61          * in an atomic region, then we must not take the fault.
  62          */
  63         if (unlikely(faulthandler_disabled() || !mm))
  64                 goto no_context;
  65 
  66         if (user_mode(regs))
  67                 flags |= FAULT_FLAG_USER;
  68 
  69         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
  70 
  71 retry:
  72         down_read(&mm->mmap_sem);
  73         vma = find_vma(mm, addr);
  74         if (unlikely(!vma))
  75                 goto bad_area;
  76         if (likely(vma->vm_start <= addr))
  77                 goto good_area;
  78         if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
  79                 goto bad_area;
  80         if (unlikely(expand_stack(vma, addr)))
  81                 goto bad_area;
  82 
  83         /*
  84          * Ok, we have a good vm_area for this memory access, so
  85          * we can handle it.
  86          */
  87 good_area:
  88         code = SEGV_ACCERR;
  89 
  90         switch (cause) {
  91         case EXC_INST_PAGE_FAULT:
  92                 if (!(vma->vm_flags & VM_EXEC))
  93                         goto bad_area;
  94                 break;
  95         case EXC_LOAD_PAGE_FAULT:
  96                 if (!(vma->vm_flags & VM_READ))
  97                         goto bad_area;
  98                 break;
  99         case EXC_STORE_PAGE_FAULT:
 100                 if (!(vma->vm_flags & VM_WRITE))
 101                         goto bad_area;
 102                 flags |= FAULT_FLAG_WRITE;
 103                 break;
 104         default:
 105                 panic("%s: unhandled cause %lu", __func__, cause);
 106         }
 107 
 108         /*
 109          * If for any reason at all we could not handle the fault,
 110          * make sure we exit gracefully rather than endlessly redo
 111          * the fault.
 112          */
 113         fault = handle_mm_fault(vma, addr, flags);
 114 
 115         /*
 116          * If we need to retry but a fatal signal is pending, handle the
 117          * signal first. We do not need to release the mmap_sem because it
 118          * would already be released in __lock_page_or_retry in mm/filemap.c.
 119          */
 120         if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
 121                 return;
 122 
 123         if (unlikely(fault & VM_FAULT_ERROR)) {
 124                 if (fault & VM_FAULT_OOM)
 125                         goto out_of_memory;
 126                 else if (fault & VM_FAULT_SIGBUS)
 127                         goto do_sigbus;
 128                 BUG();
 129         }
 130 
 131         /*
 132          * Major/minor page fault accounting is only done on the
 133          * initial attempt. If we go through a retry, it is extremely
 134          * likely that the page will be found in page cache at that point.
 135          */
 136         if (flags & FAULT_FLAG_ALLOW_RETRY) {
 137                 if (fault & VM_FAULT_MAJOR) {
 138                         tsk->maj_flt++;
 139                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
 140                                       1, regs, addr);
 141                 } else {
 142                         tsk->min_flt++;
 143                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
 144                                       1, regs, addr);
 145                 }
 146                 if (fault & VM_FAULT_RETRY) {
 147                         /*
 148                          * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 149                          * of starvation.
 150                          */
 151                         flags &= ~(FAULT_FLAG_ALLOW_RETRY);
 152                         flags |= FAULT_FLAG_TRIED;
 153 
 154                         /*
 155                          * No need to up_read(&mm->mmap_sem) as we would
 156                          * have already released it in __lock_page_or_retry
 157                          * in mm/filemap.c.
 158                          */
 159                         goto retry;
 160                 }
 161         }
 162 
 163         up_read(&mm->mmap_sem);
 164         return;
 165 
 166         /*
 167          * Something tried to access memory that isn't in our memory map.
 168          * Fix it, but check if it's kernel or user first.
 169          */
 170 bad_area:
 171         up_read(&mm->mmap_sem);
 172         /* User mode accesses just cause a SIGSEGV */
 173         if (user_mode(regs)) {
 174                 do_trap(regs, SIGSEGV, code, addr);
 175                 return;
 176         }
 177 
 178 no_context:
 179         /* Are we prepared to handle this kernel fault? */
 180         if (fixup_exception(regs))
 181                 return;
 182 
 183         /*
 184          * Oops. The kernel tried to access some bad page. We'll have to
 185          * terminate things with extreme prejudice.
 186          */
 187         bust_spinlocks(1);
 188         pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
 189                 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
 190                 "paging request", addr);
 191         die(regs, "Oops");
 192         do_exit(SIGKILL);
 193 
 194         /*
 195          * We ran out of memory, call the OOM killer, and return the userspace
 196          * (which will retry the fault, or kill us if we got oom-killed).
 197          */
 198 out_of_memory:
 199         up_read(&mm->mmap_sem);
 200         if (!user_mode(regs))
 201                 goto no_context;
 202         pagefault_out_of_memory();
 203         return;
 204 
 205 do_sigbus:
 206         up_read(&mm->mmap_sem);
 207         /* Kernel mode? Handle exceptions or die */
 208         if (!user_mode(regs))
 209                 goto no_context;
 210         do_trap(regs, SIGBUS, BUS_ADRERR, addr);
 211         return;
 212 
 213 vmalloc_fault:
 214         {
 215                 pgd_t *pgd, *pgd_k;
 216                 pud_t *pud, *pud_k;
 217                 p4d_t *p4d, *p4d_k;
 218                 pmd_t *pmd, *pmd_k;
 219                 pte_t *pte_k;
 220                 int index;
 221 
 222                 /* User mode accesses just cause a SIGSEGV */
 223                 if (user_mode(regs))
 224                         return do_trap(regs, SIGSEGV, code, addr);
 225 
 226                 /*
 227                  * Synchronize this task's top level page-table
 228                  * with the 'reference' page table.
 229                  *
 230                  * Do _not_ use "tsk->active_mm->pgd" here.
 231                  * We might be inside an interrupt in the middle
 232                  * of a task switch.
 233                  */
 234                 index = pgd_index(addr);
 235                 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
 236                 pgd_k = init_mm.pgd + index;
 237 
 238                 if (!pgd_present(*pgd_k))
 239                         goto no_context;
 240                 set_pgd(pgd, *pgd_k);
 241 
 242                 p4d = p4d_offset(pgd, addr);
 243                 p4d_k = p4d_offset(pgd_k, addr);
 244                 if (!p4d_present(*p4d_k))
 245                         goto no_context;
 246 
 247                 pud = pud_offset(p4d, addr);
 248                 pud_k = pud_offset(p4d_k, addr);
 249                 if (!pud_present(*pud_k))
 250                         goto no_context;
 251 
 252                 /*
 253                  * Since the vmalloc area is global, it is unnecessary
 254                  * to copy individual PTEs
 255                  */
 256                 pmd = pmd_offset(pud, addr);
 257                 pmd_k = pmd_offset(pud_k, addr);
 258                 if (!pmd_present(*pmd_k))
 259                         goto no_context;
 260                 set_pmd(pmd, *pmd_k);
 261 
 262                 /*
 263                  * Make sure the actual PTE exists as well to
 264                  * catch kernel vmalloc-area accesses to non-mapped
 265                  * addresses. If we don't do this, this will just
 266                  * silently loop forever.
 267                  */
 268                 pte_k = pte_offset_kernel(pmd_k, addr);
 269                 if (!pte_present(*pte_k))
 270                         goto no_context;
 271 
 272                 /*
 273                  * The kernel assumes that TLBs don't cache invalid
 274                  * entries, but in RISC-V, SFENCE.VMA specifies an
 275                  * ordering constraint, not a cache flush; it is
 276                  * necessary even after writing invalid entries.
 277                  */
 278                 local_flush_tlb_page(addr);
 279 
 280                 return;
 281         }
 282 }

/* [<][>][^][v][top][bottom][index][help] */