1/* Page Fault Handling for ARC (TLB Miss / ProtV)
2 *
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/signal.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/ptrace.h>
15#include <linux/uaccess.h>
16#include <linux/kdebug.h>
17#include <linux/perf_event.h>
18#include <asm/pgalloc.h>
19#include <asm/mmu.h>
20
21static int handle_vmalloc_fault(unsigned long address)
22{
23	/*
24	 * Synchronize this task's top level page-table
25	 * with the 'reference' page table.
26	 */
27	pgd_t *pgd, *pgd_k;
28	pud_t *pud, *pud_k;
29	pmd_t *pmd, *pmd_k;
30
31	pgd = pgd_offset_fast(current->active_mm, address);
32	pgd_k = pgd_offset_k(address);
33
34	if (!pgd_present(*pgd_k))
35		goto bad_area;
36
37	pud = pud_offset(pgd, address);
38	pud_k = pud_offset(pgd_k, address);
39	if (!pud_present(*pud_k))
40		goto bad_area;
41
42	pmd = pmd_offset(pud, address);
43	pmd_k = pmd_offset(pud_k, address);
44	if (!pmd_present(*pmd_k))
45		goto bad_area;
46
47	set_pmd(pmd, *pmd_k);
48
49	/* XXX: create the TLB entry here */
50	return 0;
51
52bad_area:
53	return 1;
54}
55
56void do_page_fault(unsigned long address, struct pt_regs *regs)
57{
58	struct vm_area_struct *vma = NULL;
59	struct task_struct *tsk = current;
60	struct mm_struct *mm = tsk->mm;
61	siginfo_t info;
62	int fault, ret;
63	int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
64	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
65
66	/*
67	 * We fault-in kernel-space virtual memory on-demand. The
68	 * 'reference' page table is init_mm.pgd.
69	 *
70	 * NOTE! We MUST NOT take any locks for this case. We may
71	 * be in an interrupt or a critical region, and should
72	 * only copy the information from the master page table,
73	 * nothing more.
74	 */
75	if (address >= VMALLOC_START && address <= VMALLOC_END) {
76		ret = handle_vmalloc_fault(address);
77		if (unlikely(ret))
78			goto bad_area_nosemaphore;
79		else
80			return;
81	}
82
83	info.si_code = SEGV_MAPERR;
84
85	/*
86	 * If we're in an interrupt or have no user
87	 * context, we must not take the fault..
88	 */
89	if (in_atomic() || !mm)
90		goto no_context;
91
92	if (user_mode(regs))
93		flags |= FAULT_FLAG_USER;
94retry:
95	down_read(&mm->mmap_sem);
96	vma = find_vma(mm, address);
97	if (!vma)
98		goto bad_area;
99	if (vma->vm_start <= address)
100		goto good_area;
101	if (!(vma->vm_flags & VM_GROWSDOWN))
102		goto bad_area;
103	if (expand_stack(vma, address))
104		goto bad_area;
105
106	/*
107	 * Ok, we have a good vm_area for this memory access, so
108	 * we can handle it..
109	 */
110good_area:
111	info.si_code = SEGV_ACCERR;
112
113	/* Handle protection violation, execute on heap or stack */
114
115	if ((regs->ecr_vec == ECR_V_PROTV) &&
116	    (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
117		goto bad_area;
118
119	if (write) {
120		if (!(vma->vm_flags & VM_WRITE))
121			goto bad_area;
122		flags |= FAULT_FLAG_WRITE;
123	} else {
124		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
125			goto bad_area;
126	}
127
128	/*
129	 * If for any reason at all we couldn't handle the fault,
130	 * make sure we exit gracefully rather than endlessly redo
131	 * the fault.
132	 */
133	fault = handle_mm_fault(mm, vma, address, flags);
134
135	/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
136	if (unlikely(fatal_signal_pending(current))) {
137		if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
138			up_read(&mm->mmap_sem);
139		if (user_mode(regs))
140			return;
141	}
142
143	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
144
145	if (likely(!(fault & VM_FAULT_ERROR))) {
146		if (flags & FAULT_FLAG_ALLOW_RETRY) {
147			/* To avoid updating stats twice for retry case */
148			if (fault & VM_FAULT_MAJOR) {
149				tsk->maj_flt++;
150				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
151					      regs, address);
152			} else {
153				tsk->min_flt++;
154				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
155					      regs, address);
156			}
157
158			if (fault & VM_FAULT_RETRY) {
159				flags &= ~FAULT_FLAG_ALLOW_RETRY;
160				flags |= FAULT_FLAG_TRIED;
161				goto retry;
162			}
163		}
164
165		/* Fault Handled Gracefully */
166		up_read(&mm->mmap_sem);
167		return;
168	}
169
170	if (fault & VM_FAULT_OOM)
171		goto out_of_memory;
172	else if (fault & VM_FAULT_SIGSEGV)
173		goto bad_area;
174	else if (fault & VM_FAULT_SIGBUS)
175		goto do_sigbus;
176
177	/* no man's land */
178	BUG();
179
180	/*
181	 * Something tried to access memory that isn't in our memory map..
182	 * Fix it, but check if it's kernel or user first..
183	 */
184bad_area:
185	up_read(&mm->mmap_sem);
186
187bad_area_nosemaphore:
188	/* User mode accesses just cause a SIGSEGV */
189	if (user_mode(regs)) {
190		tsk->thread.fault_address = address;
191		info.si_signo = SIGSEGV;
192		info.si_errno = 0;
193		/* info.si_code has been set above */
194		info.si_addr = (void __user *)address;
195		force_sig_info(SIGSEGV, &info, tsk);
196		return;
197	}
198
199no_context:
200	/* Are we prepared to handle this kernel fault?
201	 *
202	 * (The kernel has valid exception-points in the source
203	 *  when it acesses user-memory. When it fails in one
204	 *  of those points, we find it in a table and do a jump
205	 *  to some fixup code that loads an appropriate error
206	 *  code)
207	 */
208	if (fixup_exception(regs))
209		return;
210
211	die("Oops", regs, address);
212
213out_of_memory:
214	up_read(&mm->mmap_sem);
215
216	if (user_mode(regs)) {
217		pagefault_out_of_memory();
218		return;
219	}
220
221	goto no_context;
222
223do_sigbus:
224	up_read(&mm->mmap_sem);
225
226	if (!user_mode(regs))
227		goto no_context;
228
229	tsk->thread.fault_address = address;
230	info.si_signo = SIGBUS;
231	info.si_errno = 0;
232	info.si_code = BUS_ADRERR;
233	info.si_addr = (void __user *)address;
234	force_sig_info(SIGBUS, &info, tsk);
235}
236