1/*
2 *  arch/cris/mm/fault.c
3 *
4 *  Copyright (C) 2000-2010  Axis Communications AB
5 */
6
7#include <linux/mm.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/wait.h>
11#include <asm/uaccess.h>
12#include <arch/system.h>
13
14extern int find_fixup_code(struct pt_regs *);
15extern void die_if_kernel(const char *, struct pt_regs *, long);
16extern void show_registers(struct pt_regs *regs);
17
18/* debug of low-level TLB reload */
19#undef DEBUG
20
21#ifdef DEBUG
22#define D(x) x
23#else
24#define D(x)
25#endif
26
27/* debug of higher-level faults */
28#define DPG(x)
29
30/* current active page directory */
31
32DEFINE_PER_CPU(pgd_t *, current_pgd);
33unsigned long cris_signal_return_page;
34
35/*
36 * This routine handles page faults.  It determines the address,
37 * and the problem, and then passes it off to one of the appropriate
38 * routines.
39 *
40 * Notice that the address we're given is aligned to the page the fault
41 * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete
42 * address.
43 *
44 * error_code:
45 *      bit 0 == 0 means no page found, 1 means protection fault
46 *      bit 1 == 0 means read, 1 means write
47 *
48 * If this routine detects a bad access, it returns 1, otherwise it
49 * returns 0.
50 */
51
52asmlinkage void
53do_page_fault(unsigned long address, struct pt_regs *regs,
54	      int protection, int writeaccess)
55{
56	struct task_struct *tsk;
57	struct mm_struct *mm;
58	struct vm_area_struct * vma;
59	siginfo_t info;
60	int fault;
61	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
62
63	D(printk(KERN_DEBUG
64		 "Page fault for %lX on %X at %lX, prot %d write %d\n",
65		 address, smp_processor_id(), instruction_pointer(regs),
66		 protection, writeaccess));
67
68	tsk = current;
69
70	/*
71	 * We fault-in kernel-space virtual memory on-demand. The
72	 * 'reference' page table is init_mm.pgd.
73	 *
74	 * NOTE! We MUST NOT take any locks for this case. We may
75	 * be in an interrupt or a critical region, and should
76	 * only copy the information from the master page table,
77	 * nothing more.
78	 *
79	 * NOTE2: This is done so that, when updating the vmalloc
80	 * mappings we don't have to walk all processes pgdirs and
81	 * add the high mappings all at once. Instead we do it as they
82	 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
83	 * bit set so sometimes the TLB can use a lingering entry.
84	 *
85	 * This verifies that the fault happens in kernel space
86	 * and that the fault was not a protection error (error_code & 1).
87	 */
88
89	if (address >= VMALLOC_START &&
90	    !protection &&
91	    !user_mode(regs))
92		goto vmalloc_fault;
93
94	/* When stack execution is not allowed we store the signal
95	 * trampolines in the reserved cris_signal_return_page.
96	 * Handle this in the exact same way as vmalloc (we know
97	 * that the mapping is there and is valid so no need to
98	 * call handle_mm_fault).
99	 */
100	if (cris_signal_return_page &&
101	    address == cris_signal_return_page &&
102	    !protection && user_mode(regs))
103		goto vmalloc_fault;
104
105	/* we can and should enable interrupts at this point */
106	local_irq_enable();
107
108	mm = tsk->mm;
109	info.si_code = SEGV_MAPERR;
110
111	/*
112	 * If we're in an interrupt or "atomic" operation or have no
113	 * user context, we must not take the fault.
114	 */
115
116	if (in_atomic() || !mm)
117		goto no_context;
118
119	if (user_mode(regs))
120		flags |= FAULT_FLAG_USER;
121retry:
122	down_read(&mm->mmap_sem);
123	vma = find_vma(mm, address);
124	if (!vma)
125		goto bad_area;
126	if (vma->vm_start <= address)
127		goto good_area;
128	if (!(vma->vm_flags & VM_GROWSDOWN))
129		goto bad_area;
130	if (user_mode(regs)) {
131		/*
132		 * accessing the stack below usp is always a bug.
133		 * we get page-aligned addresses so we can only check
134		 * if we're within a page from usp, but that might be
135		 * enough to catch brutal errors at least.
136		 */
137		if (address + PAGE_SIZE < rdusp())
138			goto bad_area;
139	}
140	if (expand_stack(vma, address))
141		goto bad_area;
142
143	/*
144	 * Ok, we have a good vm_area for this memory access, so
145	 * we can handle it..
146	 */
147
148 good_area:
149	info.si_code = SEGV_ACCERR;
150
151	/* first do some preliminary protection checks */
152
153	if (writeaccess == 2){
154		if (!(vma->vm_flags & VM_EXEC))
155			goto bad_area;
156	} else if (writeaccess == 1) {
157		if (!(vma->vm_flags & VM_WRITE))
158			goto bad_area;
159		flags |= FAULT_FLAG_WRITE;
160	} else {
161		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
162			goto bad_area;
163	}
164
165	/*
166	 * If for any reason at all we couldn't handle the fault,
167	 * make sure we exit gracefully rather than endlessly redo
168	 * the fault.
169	 */
170
171	fault = handle_mm_fault(mm, vma, address, flags);
172
173	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
174		return;
175
176	if (unlikely(fault & VM_FAULT_ERROR)) {
177		if (fault & VM_FAULT_OOM)
178			goto out_of_memory;
179		else if (fault & VM_FAULT_SIGSEGV)
180			goto bad_area;
181		else if (fault & VM_FAULT_SIGBUS)
182			goto do_sigbus;
183		BUG();
184	}
185
186	if (flags & FAULT_FLAG_ALLOW_RETRY) {
187		if (fault & VM_FAULT_MAJOR)
188			tsk->maj_flt++;
189		else
190			tsk->min_flt++;
191		if (fault & VM_FAULT_RETRY) {
192			flags &= ~FAULT_FLAG_ALLOW_RETRY;
193			flags |= FAULT_FLAG_TRIED;
194
195			/*
196			 * No need to up_read(&mm->mmap_sem) as we would
197			 * have already released it in __lock_page_or_retry
198			 * in mm/filemap.c.
199			 */
200
201			goto retry;
202		}
203	}
204
205	up_read(&mm->mmap_sem);
206	return;
207
208	/*
209	 * Something tried to access memory that isn't in our memory map..
210	 * Fix it, but check if it's kernel or user first..
211	 */
212
213 bad_area:
214	up_read(&mm->mmap_sem);
215
216 bad_area_nosemaphore:
217	DPG(show_registers(regs));
218
219	/* User mode accesses just cause a SIGSEGV */
220
221	if (user_mode(regs)) {
222#ifdef CONFIG_NO_SEGFAULT_TERMINATION
223		DECLARE_WAIT_QUEUE_HEAD(wq);
224#endif
225		printk(KERN_NOTICE "%s (pid %d) segfaults for page "
226			"address %08lx at pc %08lx\n",
227			tsk->comm, tsk->pid,
228			address, instruction_pointer(regs));
229
230		/* With DPG on, we've already dumped registers above.  */
231		DPG(if (0))
232			show_registers(regs);
233
234#ifdef CONFIG_NO_SEGFAULT_TERMINATION
235		wait_event_interruptible(wq, 0 == 1);
236#else
237		info.si_signo = SIGSEGV;
238		info.si_errno = 0;
239		/* info.si_code has been set above */
240		info.si_addr = (void *)address;
241		force_sig_info(SIGSEGV, &info, tsk);
242#endif
243		return;
244	}
245
246 no_context:
247
248	/* Are we prepared to handle this kernel fault?
249	 *
250	 * (The kernel has valid exception-points in the source
251	 *  when it accesses user-memory. When it fails in one
252	 *  of those points, we find it in a table and do a jump
253	 *  to some fixup code that loads an appropriate error
254	 *  code)
255	 */
256
257	if (find_fixup_code(regs))
258		return;
259
260	/*
261	 * Oops. The kernel tried to access some bad page. We'll have to
262	 * terminate things with extreme prejudice.
263	 */
264
265	if (!oops_in_progress) {
266		oops_in_progress = 1;
267		if ((unsigned long) (address) < PAGE_SIZE)
268			printk(KERN_ALERT "Unable to handle kernel NULL "
269				"pointer dereference");
270		else
271			printk(KERN_ALERT "Unable to handle kernel access"
272				" at virtual address %08lx\n", address);
273
274		die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
275		oops_in_progress = 0;
276	}
277
278	do_exit(SIGKILL);
279
280	/*
281	 * We ran out of memory, or some other thing happened to us that made
282	 * us unable to handle the page fault gracefully.
283	 */
284
285 out_of_memory:
286	up_read(&mm->mmap_sem);
287	if (!user_mode(regs))
288		goto no_context;
289	pagefault_out_of_memory();
290	return;
291
292 do_sigbus:
293	up_read(&mm->mmap_sem);
294
295	/*
296	 * Send a sigbus, regardless of whether we were in kernel
297	 * or user mode.
298	 */
299	info.si_signo = SIGBUS;
300	info.si_errno = 0;
301	info.si_code = BUS_ADRERR;
302	info.si_addr = (void *)address;
303	force_sig_info(SIGBUS, &info, tsk);
304
305	/* Kernel mode? Handle exceptions or die */
306	if (!user_mode(regs))
307		goto no_context;
308	return;
309
310vmalloc_fault:
311	{
312		/*
313		 * Synchronize this task's top level page-table
314		 * with the 'reference' page table.
315		 *
316		 * Use current_pgd instead of tsk->active_mm->pgd
317		 * since the latter might be unavailable if this
318		 * code is executed in a misfortunately run irq
319		 * (like inside schedule() between switch_mm and
320		 *  switch_to...).
321		 */
322
323		int offset = pgd_index(address);
324		pgd_t *pgd, *pgd_k;
325		pud_t *pud, *pud_k;
326		pmd_t *pmd, *pmd_k;
327		pte_t *pte_k;
328
329		pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset;
330		pgd_k = init_mm.pgd + offset;
331
332		/* Since we're two-level, we don't need to do both
333		 * set_pgd and set_pmd (they do the same thing). If
334		 * we go three-level at some point, do the right thing
335		 * with pgd_present and set_pgd here.
336		 *
337		 * Also, since the vmalloc area is global, we don't
338		 * need to copy individual PTE's, it is enough to
339		 * copy the pgd pointer into the pte page of the
340		 * root task. If that is there, we'll find our pte if
341		 * it exists.
342		 */
343
344		pud = pud_offset(pgd, address);
345		pud_k = pud_offset(pgd_k, address);
346		if (!pud_present(*pud_k))
347			goto no_context;
348
349		pmd = pmd_offset(pud, address);
350		pmd_k = pmd_offset(pud_k, address);
351
352		if (!pmd_present(*pmd_k))
353			goto bad_area_nosemaphore;
354
355		set_pmd(pmd, *pmd_k);
356
357		/* Make sure the actual PTE exists as well to
358		 * catch kernel vmalloc-area accesses to non-mapped
359		 * addresses. If we don't do this, this will just
360		 * silently loop forever.
361		 */
362
363		pte_k = pte_offset_kernel(pmd_k, address);
364		if (!pte_present(*pte_k))
365			goto no_context;
366
367		return;
368	}
369}
370
371/* Find fixup code. */
372int
373find_fixup_code(struct pt_regs *regs)
374{
375	const struct exception_table_entry *fixup;
376	/* in case of delay slot fault (v32) */
377	unsigned long ip = (instruction_pointer(regs) & ~0x1);
378
379	fixup = search_exception_tables(ip);
380	if (fixup != 0) {
381		/* Adjust the instruction pointer in the stackframe. */
382		instruction_pointer(regs) = fixup->fixup;
383		arch_fixup(regs);
384		return 1;
385	}
386
387	return 0;
388}
389