1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <asm/unistd.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/mmu.h>
27#include <asm/thread_info.h>
28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h>
30#include <asm/cputable.h>
31#include <asm/firmware.h>
32#include <asm/bug.h>
33#include <asm/ptrace.h>
34#include <asm/irqflags.h>
35#include <asm/ftrace.h>
36#include <asm/hw_irq.h>
37#include <asm/context_tracking.h>
38#include <asm/tm.h>
39
40/*
41 * System calls.
42 */
43	.section	".toc","aw"
44SYS_CALL_TABLE:
45	.tc sys_call_table[TC],sys_call_table
46
47/* This value is used to mark exception frames on the stack. */
48exception_marker:
49	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
50
51	.section	".text"
52	.align 7
53
54	.globl system_call_common
55system_call_common:
56#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
57BEGIN_FTR_SECTION
58	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
59	bne	tabort_syscall
60END_FTR_SECTION_IFSET(CPU_FTR_TM)
61#endif
62	andi.	r10,r12,MSR_PR
63	mr	r10,r1
64	addi	r1,r1,-INT_FRAME_SIZE
65	beq-	1f
66	ld	r1,PACAKSAVE(r13)
671:	std	r10,0(r1)
68	std	r11,_NIP(r1)
69	std	r12,_MSR(r1)
70	std	r0,GPR0(r1)
71	std	r10,GPR1(r1)
72	beq	2f			/* if from kernel mode */
73	ACCOUNT_CPU_USER_ENTRY(r10, r11)
742:	std	r2,GPR2(r1)
75	std	r3,GPR3(r1)
76	mfcr	r2
77	std	r4,GPR4(r1)
78	std	r5,GPR5(r1)
79	std	r6,GPR6(r1)
80	std	r7,GPR7(r1)
81	std	r8,GPR8(r1)
82	li	r11,0
83	std	r11,GPR9(r1)
84	std	r11,GPR10(r1)
85	std	r11,GPR11(r1)
86	std	r11,GPR12(r1)
87	std	r11,_XER(r1)
88	std	r11,_CTR(r1)
89	std	r9,GPR13(r1)
90	mflr	r10
91	/*
92	 * This clears CR0.SO (bit 28), which is the error indication on
93	 * return from this system call.
94	 */
95	rldimi	r2,r11,28,(63-28)
96	li	r11,0xc01
97	std	r10,_LINK(r1)
98	std	r11,_TRAP(r1)
99	std	r3,ORIG_GPR3(r1)
100	std	r2,_CCR(r1)
101	ld	r2,PACATOC(r13)
102	addi	r9,r1,STACK_FRAME_OVERHEAD
103	ld	r11,exception_marker@toc(r2)
104	std	r11,-16(r9)		/* "regshere" marker */
105#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
106BEGIN_FW_FTR_SECTION
107	beq	33f
108	/* if from user, see if there are any DTL entries to process */
109	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
110	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
111	addi	r10,r10,LPPACA_DTLIDX
112	LDX_BE	r10,0,r10		/* get log write index */
113	cmpd	cr1,r11,r10
114	beq+	cr1,33f
115	bl	accumulate_stolen_time
116	REST_GPR(0,r1)
117	REST_4GPRS(3,r1)
118	REST_2GPRS(7,r1)
119	addi	r9,r1,STACK_FRAME_OVERHEAD
12033:
121END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
122#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
123
124	/*
125	 * A syscall should always be called with interrupts enabled
126	 * so we just unconditionally hard-enable here. When some kind
127	 * of irq tracing is used, we additionally check that condition
128	 * is correct
129	 */
130#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
131	lbz	r10,PACASOFTIRQEN(r13)
132	xori	r10,r10,1
1331:	tdnei	r10,0
134	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
135#endif
136
137#ifdef CONFIG_PPC_BOOK3E
138	wrteei	1
139#else
140	ld	r11,PACAKMSR(r13)
141	ori	r11,r11,MSR_EE
142	mtmsrd	r11,1
143#endif /* CONFIG_PPC_BOOK3E */
144
145	/* We do need to set SOFTE in the stack frame or the return
146	 * from interrupt will be painful
147	 */
148	li	r10,1
149	std	r10,SOFTE(r1)
150
151	CURRENT_THREAD_INFO(r11, r1)
152	ld	r10,TI_FLAGS(r11)
153	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
154	bne	syscall_dotrace		/* does not return */
155	cmpldi	0,r0,NR_syscalls
156	bge-	syscall_enosys
157
158system_call:			/* label this so stack traces look sane */
159/*
160 * Need to vector to 32 Bit or default sys_call_table here,
161 * based on caller's run-mode / personality.
162 */
163	ld	r11,SYS_CALL_TABLE@toc(2)
164	andi.	r10,r10,_TIF_32BIT
165	beq	15f
166	addi	r11,r11,8	/* use 32-bit syscall entries */
167	clrldi	r3,r3,32
168	clrldi	r4,r4,32
169	clrldi	r5,r5,32
170	clrldi	r6,r6,32
171	clrldi	r7,r7,32
172	clrldi	r8,r8,32
17315:
174	slwi	r0,r0,4
175	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
176	mtctr   r12
177	bctrl			/* Call handler */
178
179.Lsyscall_exit:
180	std	r3,RESULT(r1)
181	CURRENT_THREAD_INFO(r12, r1)
182
183	ld	r8,_MSR(r1)
184#ifdef CONFIG_PPC_BOOK3S
185	/* No MSR:RI on BookE */
186	andi.	r10,r8,MSR_RI
187	beq-	unrecov_restore
188#endif
189	/*
190	 * Disable interrupts so current_thread_info()->flags can't change,
191	 * and so that we don't get interrupted after loading SRR0/1.
192	 */
193#ifdef CONFIG_PPC_BOOK3E
194	wrteei	0
195#else
196	ld	r10,PACAKMSR(r13)
197	/*
198	 * For performance reasons we clear RI the same time that we
199	 * clear EE. We only need to clear RI just before we restore r13
200	 * below, but batching it with EE saves us one expensive mtmsrd call.
201	 * We have to be careful to restore RI if we branch anywhere from
202	 * here (eg syscall_exit_work).
203	 */
204	li	r9,MSR_RI
205	andc	r11,r10,r9
206	mtmsrd	r11,1
207#endif /* CONFIG_PPC_BOOK3E */
208
209	ld	r9,TI_FLAGS(r12)
210	li	r11,-MAX_ERRNO
211	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
212	bne-	syscall_exit_work
213	cmpld	r3,r11
214	ld	r5,_CCR(r1)
215	bge-	syscall_error
216.Lsyscall_error_cont:
217	ld	r7,_NIP(r1)
218BEGIN_FTR_SECTION
219	stdcx.	r0,0,r1			/* to clear the reservation */
220END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
221	andi.	r6,r8,MSR_PR
222	ld	r4,_LINK(r1)
223
224	beq-	1f
225	ACCOUNT_CPU_USER_EXIT(r11, r12)
226	HMT_MEDIUM_LOW_HAS_PPR
227	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
2281:	ld	r2,GPR2(r1)
229	ld	r1,GPR1(r1)
230	mtlr	r4
231	mtcr	r5
232	mtspr	SPRN_SRR0,r7
233	mtspr	SPRN_SRR1,r8
234	RFI
235	b	.	/* prevent speculative execution */
236
237syscall_error:
238	oris	r5,r5,0x1000	/* Set SO bit in CR */
239	neg	r3,r3
240	std	r5,_CCR(r1)
241	b	.Lsyscall_error_cont
242
243/* Traced system call support */
244syscall_dotrace:
245	bl	save_nvgprs
246	addi	r3,r1,STACK_FRAME_OVERHEAD
247	bl	do_syscall_trace_enter
248
249	/*
250	 * We use the return value of do_syscall_trace_enter() as the syscall
251	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
252	 * returns an invalid syscall number and the test below against
253	 * NR_syscalls will fail.
254	 */
255	mr	r0,r3
256
257	/* Restore argument registers just clobbered and/or possibly changed. */
258	ld	r3,GPR3(r1)
259	ld	r4,GPR4(r1)
260	ld	r5,GPR5(r1)
261	ld	r6,GPR6(r1)
262	ld	r7,GPR7(r1)
263	ld	r8,GPR8(r1)
264
265	/* Repopulate r9 and r10 for the system_call path */
266	addi	r9,r1,STACK_FRAME_OVERHEAD
267	CURRENT_THREAD_INFO(r10, r1)
268	ld	r10,TI_FLAGS(r10)
269
270	cmpldi	r0,NR_syscalls
271	blt+	system_call
272
273	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
274	b	.Lsyscall_exit
275
276
277syscall_enosys:
278	li	r3,-ENOSYS
279	b	.Lsyscall_exit
280
281syscall_exit_work:
282#ifdef CONFIG_PPC_BOOK3S
283	mtmsrd	r10,1		/* Restore RI */
284#endif
285	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
286	 If TIF_NOERROR is set, just save r3 as it is. */
287
288	andi.	r0,r9,_TIF_RESTOREALL
289	beq+	0f
290	REST_NVGPRS(r1)
291	b	2f
2920:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
293	blt+	1f
294	andi.	r0,r9,_TIF_NOERROR
295	bne-	1f
296	ld	r5,_CCR(r1)
297	neg	r3,r3
298	oris	r5,r5,0x1000	/* Set SO bit in CR */
299	std	r5,_CCR(r1)
3001:	std	r3,GPR3(r1)
3012:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
302	beq	4f
303
304	/* Clear per-syscall TIF flags if any are set.  */
305
306	li	r11,_TIF_PERSYSCALL_MASK
307	addi	r12,r12,TI_FLAGS
3083:	ldarx	r10,0,r12
309	andc	r10,r10,r11
310	stdcx.	r10,0,r12
311	bne-	3b
312	subi	r12,r12,TI_FLAGS
313
3144:	/* Anything else left to do? */
315	SET_DEFAULT_THREAD_PPR(r3, r10)		/* Set thread.ppr = 3 */
316	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
317	beq	ret_from_except_lite
318
319	/* Re-enable interrupts */
320#ifdef CONFIG_PPC_BOOK3E
321	wrteei	1
322#else
323	ld	r10,PACAKMSR(r13)
324	ori	r10,r10,MSR_EE
325	mtmsrd	r10,1
326#endif /* CONFIG_PPC_BOOK3E */
327
328	bl	save_nvgprs
329	addi	r3,r1,STACK_FRAME_OVERHEAD
330	bl	do_syscall_trace_leave
331	b	ret_from_except
332
333#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
334tabort_syscall:
335	/* Firstly we need to enable TM in the kernel */
336	mfmsr	r10
337	li	r13, 1
338	rldimi	r10, r13, MSR_TM_LG, 63-MSR_TM_LG
339	mtmsrd	r10, 0
340
341	/* tabort, this dooms the transaction, nothing else */
342	li	r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
343	TABORT(R13)
344
345	/*
346	 * Return directly to userspace. We have corrupted user register state,
347	 * but userspace will never see that register state. Execution will
348	 * resume after the tbegin of the aborted transaction with the
349	 * checkpointed register state.
350	 */
351	li	r13, MSR_RI
352	andc	r10, r10, r13
353	mtmsrd	r10, 1
354	mtspr	SPRN_SRR0, r11
355	mtspr	SPRN_SRR1, r12
356
357	rfid
358	b	.	/* prevent speculative execution */
359#endif
360
361/* Save non-volatile GPRs, if not already saved. */
362_GLOBAL(save_nvgprs)
363	ld	r11,_TRAP(r1)
364	andi.	r0,r11,1
365	beqlr-
366	SAVE_NVGPRS(r1)
367	clrrdi	r0,r11,1
368	std	r0,_TRAP(r1)
369	blr
370
371
372/*
373 * The sigsuspend and rt_sigsuspend system calls can call do_signal
374 * and thus put the process into the stopped state where we might
375 * want to examine its user state with ptrace.  Therefore we need
376 * to save all the nonvolatile registers (r14 - r31) before calling
377 * the C code.  Similarly, fork, vfork and clone need the full
378 * register state on the stack so that it can be copied to the child.
379 */
380
381_GLOBAL(ppc_fork)
382	bl	save_nvgprs
383	bl	sys_fork
384	b	.Lsyscall_exit
385
386_GLOBAL(ppc_vfork)
387	bl	save_nvgprs
388	bl	sys_vfork
389	b	.Lsyscall_exit
390
391_GLOBAL(ppc_clone)
392	bl	save_nvgprs
393	bl	sys_clone
394	b	.Lsyscall_exit
395
396_GLOBAL(ppc32_swapcontext)
397	bl	save_nvgprs
398	bl	compat_sys_swapcontext
399	b	.Lsyscall_exit
400
401_GLOBAL(ppc64_swapcontext)
402	bl	save_nvgprs
403	bl	sys_swapcontext
404	b	.Lsyscall_exit
405
406_GLOBAL(ppc_switch_endian)
407	bl	save_nvgprs
408	bl	sys_switch_endian
409	b	.Lsyscall_exit
410
411_GLOBAL(ret_from_fork)
412	bl	schedule_tail
413	REST_NVGPRS(r1)
414	li	r3,0
415	b	.Lsyscall_exit
416
417_GLOBAL(ret_from_kernel_thread)
418	bl	schedule_tail
419	REST_NVGPRS(r1)
420	mtlr	r14
421	mr	r3,r15
422#if defined(_CALL_ELF) && _CALL_ELF == 2
423	mr	r12,r14
424#endif
425	blrl
426	li	r3,0
427	b	.Lsyscall_exit
428
429/*
430 * This routine switches between two different tasks.  The process
431 * state of one is saved on its kernel stack.  Then the state
432 * of the other is restored from its kernel stack.  The memory
433 * management hardware is updated to the second process's state.
434 * Finally, we can return to the second process, via ret_from_except.
435 * On entry, r3 points to the THREAD for the current task, r4
436 * points to the THREAD for the new task.
437 *
438 * Note: there are two ways to get to the "going out" portion
439 * of this code; either by coming in via the entry (_switch)
440 * or via "fork" which must set up an environment equivalent
441 * to the "_switch" path.  If you change this you'll have to change
442 * the fork code also.
443 *
444 * The code which creates the new task context is in 'copy_thread'
445 * in arch/powerpc/kernel/process.c
446 */
447	.align	7
448_GLOBAL(_switch)
449	mflr	r0
450	std	r0,16(r1)
451	stdu	r1,-SWITCH_FRAME_SIZE(r1)
452	/* r3-r13 are caller saved -- Cort */
453	SAVE_8GPRS(14, r1)
454	SAVE_10GPRS(22, r1)
455	mflr	r20		/* Return to switch caller */
456	mfmsr	r22
457	li	r0, MSR_FP
458#ifdef CONFIG_VSX
459BEGIN_FTR_SECTION
460	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
461END_FTR_SECTION_IFSET(CPU_FTR_VSX)
462#endif /* CONFIG_VSX */
463#ifdef CONFIG_ALTIVEC
464BEGIN_FTR_SECTION
465	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
466	mfspr	r24,SPRN_VRSAVE	/* save vrsave register value */
467	std	r24,THREAD_VRSAVE(r3)
468END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
469#endif /* CONFIG_ALTIVEC */
470	and.	r0,r0,r22
471	beq+	1f
472	andc	r22,r22,r0
473	MTMSRD(r22)
474	isync
4751:	std	r20,_NIP(r1)
476	mfcr	r23
477	std	r23,_CCR(r1)
478	std	r1,KSP(r3)	/* Set old stack pointer */
479
480#ifdef CONFIG_PPC_BOOK3S_64
481BEGIN_FTR_SECTION
482	/* Event based branch registers */
483	mfspr	r0, SPRN_BESCR
484	std	r0, THREAD_BESCR(r3)
485	mfspr	r0, SPRN_EBBHR
486	std	r0, THREAD_EBBHR(r3)
487	mfspr	r0, SPRN_EBBRR
488	std	r0, THREAD_EBBRR(r3)
489END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
490#endif
491
492#ifdef CONFIG_SMP
493	/* We need a sync somewhere here to make sure that if the
494	 * previous task gets rescheduled on another CPU, it sees all
495	 * stores it has performed on this one.
496	 */
497	sync
498#endif /* CONFIG_SMP */
499
500	/*
501	 * If we optimise away the clear of the reservation in system
502	 * calls because we know the CPU tracks the address of the
503	 * reservation, then we need to clear it here to cover the
504	 * case that the kernel context switch path has no larx
505	 * instructions.
506	 */
507BEGIN_FTR_SECTION
508	ldarx	r6,0,r1
509END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
510
511#ifdef CONFIG_PPC_BOOK3S
512/* Cancel all explict user streams as they will have no use after context
513 * switch and will stop the HW from creating streams itself
514 */
515	DCBT_STOP_ALL_STREAM_IDS(r6)
516#endif
517
518	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
519	std	r6,PACACURRENT(r13)	/* Set new 'current' */
520
521	ld	r8,KSP(r4)	/* new stack pointer */
522#ifdef CONFIG_PPC_BOOK3S
523BEGIN_FTR_SECTION
524	clrrdi	r6,r8,28	/* get its ESID */
525	clrrdi	r9,r1,28	/* get current sp ESID */
526FTR_SECTION_ELSE
527	clrrdi	r6,r8,40	/* get its 1T ESID */
528	clrrdi	r9,r1,40	/* get current sp 1T ESID */
529ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
530	clrldi.	r0,r6,2		/* is new ESID c00000000? */
531	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
532	cror	eq,4*cr1+eq,eq
533	beq	2f		/* if yes, don't slbie it */
534
535	/* Bolt in the new stack SLB entry */
536	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
537	oris	r0,r6,(SLB_ESID_V)@h
538	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
539BEGIN_FTR_SECTION
540	li	r9,MMU_SEGSIZE_1T	/* insert B field */
541	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
542	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
543END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
544
545	/* Update the last bolted SLB.  No write barriers are needed
546	 * here, provided we only update the current CPU's SLB shadow
547	 * buffer.
548	 */
549	ld	r9,PACA_SLBSHADOWPTR(r13)
550	li	r12,0
551	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
552	li	r12,SLBSHADOW_STACKVSID
553	STDX_BE	r7,r12,r9			/* Save VSID */
554	li	r12,SLBSHADOW_STACKESID
555	STDX_BE	r0,r12,r9			/* Save ESID */
556
557	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
558	 * we have 1TB segments, the only CPUs known to have the errata
559	 * only support less than 1TB of system memory and we'll never
560	 * actually hit this code path.
561	 */
562
563	slbie	r6
564	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
565	slbmte	r7,r0
566	isync
5672:
568#endif /* !CONFIG_PPC_BOOK3S */
569
570	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
571	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
572	   because we don't need to leave the 288-byte ABI gap at the
573	   top of the kernel stack. */
574	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
575
576	mr	r1,r8		/* start using new stack pointer */
577	std	r7,PACAKSAVE(r13)
578
579#ifdef CONFIG_PPC_BOOK3S_64
580BEGIN_FTR_SECTION
581	/* Event based branch registers */
582	ld	r0, THREAD_BESCR(r4)
583	mtspr	SPRN_BESCR, r0
584	ld	r0, THREAD_EBBHR(r4)
585	mtspr	SPRN_EBBHR, r0
586	ld	r0, THREAD_EBBRR(r4)
587	mtspr	SPRN_EBBRR, r0
588
589	ld	r0,THREAD_TAR(r4)
590	mtspr	SPRN_TAR,r0
591END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
592#endif
593
594#ifdef CONFIG_ALTIVEC
595BEGIN_FTR_SECTION
596	ld	r0,THREAD_VRSAVE(r4)
597	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
598END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
599#endif /* CONFIG_ALTIVEC */
600#ifdef CONFIG_PPC64
601BEGIN_FTR_SECTION
602	lwz	r6,THREAD_DSCR_INHERIT(r4)
603	ld	r0,THREAD_DSCR(r4)
604	cmpwi	r6,0
605	bne	1f
606	ld	r0,PACA_DSCR_DEFAULT(r13)
6071:
608BEGIN_FTR_SECTION_NESTED(70)
609	mfspr	r8, SPRN_FSCR
610	rldimi	r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
611	mtspr	SPRN_FSCR, r8
612END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
613	cmpd	r0,r25
614	beq	2f
615	mtspr	SPRN_DSCR,r0
6162:
617END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
618#endif
619
620	ld	r6,_CCR(r1)
621	mtcrf	0xFF,r6
622
623	/* r3-r13 are destroyed -- Cort */
624	REST_8GPRS(14, r1)
625	REST_10GPRS(22, r1)
626
627	/* convert old thread to its task_struct for return value */
628	addi	r3,r3,-THREAD
629	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
630	mtlr	r7
631	addi	r1,r1,SWITCH_FRAME_SIZE
632	blr
633
634	.align	7
635_GLOBAL(ret_from_except)
636	ld	r11,_TRAP(r1)
637	andi.	r0,r11,1
638	bne	ret_from_except_lite
639	REST_NVGPRS(r1)
640
641_GLOBAL(ret_from_except_lite)
642	/*
643	 * Disable interrupts so that current_thread_info()->flags
644	 * can't change between when we test it and when we return
645	 * from the interrupt.
646	 */
647#ifdef CONFIG_PPC_BOOK3E
648	wrteei	0
649#else
650	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
651	mtmsrd	r10,1		  /* Update machine state */
652#endif /* CONFIG_PPC_BOOK3E */
653
654	CURRENT_THREAD_INFO(r9, r1)
655	ld	r3,_MSR(r1)
656#ifdef CONFIG_PPC_BOOK3E
657	ld	r10,PACACURRENT(r13)
658#endif /* CONFIG_PPC_BOOK3E */
659	ld	r4,TI_FLAGS(r9)
660	andi.	r3,r3,MSR_PR
661	beq	resume_kernel
662#ifdef CONFIG_PPC_BOOK3E
663	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
664#endif /* CONFIG_PPC_BOOK3E */
665
666	/* Check current_thread_info()->flags */
667	andi.	r0,r4,_TIF_USER_WORK_MASK
668#ifdef CONFIG_PPC_BOOK3E
669	bne	1f
670	/*
671	 * Check to see if the dbcr0 register is set up to debug.
672	 * Use the internal debug mode bit to do this.
673	 */
674	andis.	r0,r3,DBCR0_IDM@h
675	beq	restore
676	mfmsr	r0
677	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
678	mtmsr	r0
679	mtspr	SPRN_DBCR0,r3
680	li	r10, -1
681	mtspr	SPRN_DBSR,r10
682	b	restore
683#else
684	beq	restore
685#endif
6861:	andi.	r0,r4,_TIF_NEED_RESCHED
687	beq	2f
688	bl	restore_interrupts
689	SCHEDULE_USER
690	b	ret_from_except_lite
6912:
692#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
693	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
694	bne	3f		/* only restore TM if nothing else to do */
695	addi	r3,r1,STACK_FRAME_OVERHEAD
696	bl	restore_tm_state
697	b	restore
6983:
699#endif
700	bl	save_nvgprs
701	/*
702	 * Use a non volatile GPR to save and restore our thread_info flags
703	 * across the call to restore_interrupts.
704	 */
705	mr	r30,r4
706	bl	restore_interrupts
707	mr	r4,r30
708	addi	r3,r1,STACK_FRAME_OVERHEAD
709	bl	do_notify_resume
710	b	ret_from_except
711
712resume_kernel:
713	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
714	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
715	beq+	1f
716
717	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
718
719	lwz	r3,GPR1(r1)
720	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
721	mr	r4,r1			/* src:  current exception frame */
722	mr	r1,r3			/* Reroute the trampoline frame to r1 */
723
724	/* Copy from the original to the trampoline. */
725	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
726	li	r6,0			/* start offset: 0 */
727	mtctr	r5
7282:	ldx	r0,r6,r4
729	stdx	r0,r6,r3
730	addi	r6,r6,8
731	bdnz	2b
732
733	/* Do real store operation to complete stwu */
734	lwz	r5,GPR1(r1)
735	std	r8,0(r5)
736
737	/* Clear _TIF_EMULATE_STACK_STORE flag */
738	lis	r11,_TIF_EMULATE_STACK_STORE@h
739	addi	r5,r9,TI_FLAGS
7400:	ldarx	r4,0,r5
741	andc	r4,r4,r11
742	stdcx.	r4,0,r5
743	bne-	0b
7441:
745
746#ifdef CONFIG_PREEMPT
747	/* Check if we need to preempt */
748	andi.	r0,r4,_TIF_NEED_RESCHED
749	beq+	restore
750	/* Check that preempt_count() == 0 and interrupts are enabled */
751	lwz	r8,TI_PREEMPT(r9)
752	cmpwi	cr1,r8,0
753	ld	r0,SOFTE(r1)
754	cmpdi	r0,0
755	crandc	eq,cr1*4+eq,eq
756	bne	restore
757
758	/*
759	 * Here we are preempting the current task. We want to make
760	 * sure we are soft-disabled first and reconcile irq state.
761	 */
762	RECONCILE_IRQ_STATE(r3,r4)
7631:	bl	preempt_schedule_irq
764
765	/* Re-test flags and eventually loop */
766	CURRENT_THREAD_INFO(r9, r1)
767	ld	r4,TI_FLAGS(r9)
768	andi.	r0,r4,_TIF_NEED_RESCHED
769	bne	1b
770
771	/*
772	 * arch_local_irq_restore() from preempt_schedule_irq above may
773	 * enable hard interrupt but we really should disable interrupts
774	 * when we return from the interrupt, and so that we don't get
775	 * interrupted after loading SRR0/1.
776	 */
777#ifdef CONFIG_PPC_BOOK3E
778	wrteei	0
779#else
780	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
781	mtmsrd	r10,1		  /* Update machine state */
782#endif /* CONFIG_PPC_BOOK3E */
783#endif /* CONFIG_PREEMPT */
784
785	.globl	fast_exc_return_irq
786fast_exc_return_irq:
787restore:
788	/*
789	 * This is the main kernel exit path. First we check if we
790	 * are about to re-enable interrupts
791	 */
792	ld	r5,SOFTE(r1)
793	lbz	r6,PACASOFTIRQEN(r13)
794	cmpwi	cr0,r5,0
795	beq	restore_irq_off
796
797	/* We are enabling, were we already enabled ? Yes, just return */
798	cmpwi	cr0,r6,1
799	beq	cr0,do_restore
800
801	/*
802	 * We are about to soft-enable interrupts (we are hard disabled
803	 * at this point). We check if there's anything that needs to
804	 * be replayed first.
805	 */
806	lbz	r0,PACAIRQHAPPENED(r13)
807	cmpwi	cr0,r0,0
808	bne-	restore_check_irq_replay
809
810	/*
811	 * Get here when nothing happened while soft-disabled, just
812	 * soft-enable and move-on. We will hard-enable as a side
813	 * effect of rfi
814	 */
815restore_no_replay:
816	TRACE_ENABLE_INTS
817	li	r0,1
818	stb	r0,PACASOFTIRQEN(r13);
819
820	/*
821	 * Final return path. BookE is handled in a different file
822	 */
823do_restore:
824#ifdef CONFIG_PPC_BOOK3E
825	b	exception_return_book3e
826#else
827	/*
828	 * Clear the reservation. If we know the CPU tracks the address of
829	 * the reservation then we can potentially save some cycles and use
830	 * a larx. On POWER6 and POWER7 this is significantly faster.
831	 */
832BEGIN_FTR_SECTION
833	stdcx.	r0,0,r1		/* to clear the reservation */
834FTR_SECTION_ELSE
835	ldarx	r4,0,r1
836ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
837
838	/*
839	 * Some code path such as load_up_fpu or altivec return directly
840	 * here. They run entirely hard disabled and do not alter the
841	 * interrupt state. They also don't use lwarx/stwcx. and thus
842	 * are known not to leave dangling reservations.
843	 */
844	.globl	fast_exception_return
845fast_exception_return:
846	ld	r3,_MSR(r1)
847	ld	r4,_CTR(r1)
848	ld	r0,_LINK(r1)
849	mtctr	r4
850	mtlr	r0
851	ld	r4,_XER(r1)
852	mtspr	SPRN_XER,r4
853
854	REST_8GPRS(5, r1)
855
856	andi.	r0,r3,MSR_RI
857	beq-	unrecov_restore
858
859	/* Load PPR from thread struct before we clear MSR:RI */
860BEGIN_FTR_SECTION
861	ld	r2,PACACURRENT(r13)
862	ld	r2,TASKTHREADPPR(r2)
863END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
864
865	/*
866	 * Clear RI before restoring r13.  If we are returning to
867	 * userspace and we take an exception after restoring r13,
868	 * we end up corrupting the userspace r13 value.
869	 */
870	ld	r4,PACAKMSR(r13) /* Get kernel MSR without EE */
871	andc	r4,r4,r0	 /* r0 contains MSR_RI here */
872	mtmsrd	r4,1
873
874#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
875	/* TM debug */
876	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
877#endif
878	/*
879	 * r13 is our per cpu area, only restore it if we are returning to
880	 * userspace the value stored in the stack frame may belong to
881	 * another CPU.
882	 */
883	andi.	r0,r3,MSR_PR
884	beq	1f
885BEGIN_FTR_SECTION
886	mtspr	SPRN_PPR,r2	/* Restore PPR */
887END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
888	ACCOUNT_CPU_USER_EXIT(r2, r4)
889	REST_GPR(13, r1)
8901:
891	mtspr	SPRN_SRR1,r3
892
893	ld	r2,_CCR(r1)
894	mtcrf	0xFF,r2
895	ld	r2,_NIP(r1)
896	mtspr	SPRN_SRR0,r2
897
898	ld	r0,GPR0(r1)
899	ld	r2,GPR2(r1)
900	ld	r3,GPR3(r1)
901	ld	r4,GPR4(r1)
902	ld	r1,GPR1(r1)
903
904	rfid
905	b	.	/* prevent speculative execution */
906
907#endif /* CONFIG_PPC_BOOK3E */
908
909	/*
910	 * We are returning to a context with interrupts soft disabled.
911	 *
912	 * However, we may also about to hard enable, so we need to
913	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
914	 * or that bit can get out of sync and bad things will happen
915	 */
916restore_irq_off:
917	ld	r3,_MSR(r1)
918	lbz	r7,PACAIRQHAPPENED(r13)
919	andi.	r0,r3,MSR_EE
920	beq	1f
921	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
922	stb	r7,PACAIRQHAPPENED(r13)
9231:	li	r0,0
924	stb	r0,PACASOFTIRQEN(r13);
925	TRACE_DISABLE_INTS
926	b	do_restore
927
928	/*
929	 * Something did happen, check if a re-emit is needed
930	 * (this also clears paca->irq_happened)
931	 */
932restore_check_irq_replay:
933	/* XXX: We could implement a fast path here where we check
934	 * for irq_happened being just 0x01, in which case we can
935	 * clear it and return. That means that we would potentially
936	 * miss a decrementer having wrapped all the way around.
937	 *
938	 * Still, this might be useful for things like hash_page
939	 */
940	bl	__check_irq_replay
941	cmpwi	cr0,r3,0
942 	beq	restore_no_replay
943
944	/*
945	 * We need to re-emit an interrupt. We do so by re-using our
946	 * existing exception frame. We first change the trap value,
947	 * but we need to ensure we preserve the low nibble of it
948	 */
949	ld	r4,_TRAP(r1)
950	clrldi	r4,r4,60
951	or	r4,r4,r3
952	std	r4,_TRAP(r1)
953
954	/*
955	 * Then find the right handler and call it. Interrupts are
956	 * still soft-disabled and we keep them that way.
957	*/
958	cmpwi	cr0,r3,0x500
959	bne	1f
960	addi	r3,r1,STACK_FRAME_OVERHEAD;
961 	bl	do_IRQ
962	b	ret_from_except
9631:	cmpwi	cr0,r3,0xe60
964	bne	1f
965	addi	r3,r1,STACK_FRAME_OVERHEAD;
966	bl	handle_hmi_exception
967	b	ret_from_except
9681:	cmpwi	cr0,r3,0x900
969	bne	1f
970	addi	r3,r1,STACK_FRAME_OVERHEAD;
971	bl	timer_interrupt
972	b	ret_from_except
973#ifdef CONFIG_PPC_DOORBELL
9741:
975#ifdef CONFIG_PPC_BOOK3E
976	cmpwi	cr0,r3,0x280
977#else
978	BEGIN_FTR_SECTION
979		cmpwi	cr0,r3,0xe80
980	FTR_SECTION_ELSE
981		cmpwi	cr0,r3,0xa00
982	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
983#endif /* CONFIG_PPC_BOOK3E */
984	bne	1f
985	addi	r3,r1,STACK_FRAME_OVERHEAD;
986	bl	doorbell_exception
987	b	ret_from_except
988#endif /* CONFIG_PPC_DOORBELL */
9891:	b	ret_from_except /* What else to do here ? */
990
991unrecov_restore:
992	addi	r3,r1,STACK_FRAME_OVERHEAD
993	bl	unrecoverable_exception
994	b	unrecov_restore
995
996#ifdef CONFIG_PPC_RTAS
997/*
998 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
999 * called with the MMU off.
1000 *
1001 * In addition, we need to be in 32b mode, at least for now.
1002 *
1003 * Note: r3 is an input parameter to rtas, so don't trash it...
1004 */
1005_GLOBAL(enter_rtas)
1006	mflr	r0
1007	std	r0,16(r1)
1008        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
1009
1010	/* Because RTAS is running in 32b mode, it clobbers the high order half
1011	 * of all registers that it saves.  We therefore save those registers
1012	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
1013   	 */
1014	SAVE_GPR(2, r1)			/* Save the TOC */
1015	SAVE_GPR(13, r1)		/* Save paca */
1016	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
1017	SAVE_10GPRS(22, r1)		/* ditto */
1018
1019	mfcr	r4
1020	std	r4,_CCR(r1)
1021	mfctr	r5
1022	std	r5,_CTR(r1)
1023	mfspr	r6,SPRN_XER
1024	std	r6,_XER(r1)
1025	mfdar	r7
1026	std	r7,_DAR(r1)
1027	mfdsisr	r8
1028	std	r8,_DSISR(r1)
1029
1030	/* Temporary workaround to clear CR until RTAS can be modified to
1031	 * ignore all bits.
1032	 */
1033	li	r0,0
1034	mtcr	r0
1035
1036#ifdef CONFIG_BUG
1037	/* There is no way it is acceptable to get here with interrupts enabled,
1038	 * check it with the asm equivalent of WARN_ON
1039	 */
1040	lbz	r0,PACASOFTIRQEN(r13)
10411:	tdnei	r0,0
1042	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1043#endif
1044
1045	/* Hard-disable interrupts */
1046	mfmsr	r6
1047	rldicl	r7,r6,48,1
1048	rotldi	r7,r7,16
1049	mtmsrd	r7,1
1050
1051	/* Unfortunately, the stack pointer and the MSR are also clobbered,
1052	 * so they are saved in the PACA which allows us to restore
1053	 * our original state after RTAS returns.
1054         */
1055	std	r1,PACAR1(r13)
1056        std	r6,PACASAVEDMSR(r13)
1057
1058	/* Setup our real return addr */
1059	LOAD_REG_ADDR(r4,rtas_return_loc)
1060	clrldi	r4,r4,2			/* convert to realmode address */
1061       	mtlr	r4
1062
1063	li	r0,0
1064	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1065	andc	r0,r6,r0
1066
1067        li      r9,1
1068        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1069	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1070	andc	r6,r0,r9
1071	sync				/* disable interrupts so SRR0/1 */
1072	mtmsrd	r0			/* don't get trashed */
1073
1074	LOAD_REG_ADDR(r4, rtas)
1075	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
1076	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
1077
1078	mtspr	SPRN_SRR0,r5
1079	mtspr	SPRN_SRR1,r6
1080	rfid
1081	b	.	/* prevent speculative execution */
1082
1083rtas_return_loc:
1084	FIXUP_ENDIAN
1085
1086	/* relocation is off at this point */
1087	GET_PACA(r4)
1088	clrldi	r4,r4,2			/* convert to realmode address */
1089
1090	bcl	20,31,$+4
10910:	mflr	r3
1092	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
1093
1094	mfmsr   r6
1095	li	r0,MSR_RI
1096	andc	r6,r6,r0
1097	sync
1098	mtmsrd  r6
1099
1100        ld	r1,PACAR1(r4)           /* Restore our SP */
1101        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
1102
1103	mtspr	SPRN_SRR0,r3
1104	mtspr	SPRN_SRR1,r4
1105	rfid
1106	b	.	/* prevent speculative execution */
1107
1108	.align	3
11091:	.llong	rtas_restore_regs
1110
1111rtas_restore_regs:
1112	/* relocation is on at this point */
1113	REST_GPR(2, r1)			/* Restore the TOC */
1114	REST_GPR(13, r1)		/* Restore paca */
1115	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
1116	REST_10GPRS(22, r1)		/* ditto */
1117
1118	GET_PACA(r13)
1119
1120	ld	r4,_CCR(r1)
1121	mtcr	r4
1122	ld	r5,_CTR(r1)
1123	mtctr	r5
1124	ld	r6,_XER(r1)
1125	mtspr	SPRN_XER,r6
1126	ld	r7,_DAR(r1)
1127	mtdar	r7
1128	ld	r8,_DSISR(r1)
1129	mtdsisr	r8
1130
1131        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
1132	ld	r0,16(r1)		/* get return address */
1133
1134	mtlr    r0
1135        blr				/* return to caller */
1136
1137#endif /* CONFIG_PPC_RTAS */
1138
1139_GLOBAL(enter_prom)
1140	mflr	r0
1141	std	r0,16(r1)
1142        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
1143
1144	/* Because PROM is running in 32b mode, it clobbers the high order half
1145	 * of all registers that it saves.  We therefore save those registers
1146	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
1147   	 */
1148	SAVE_GPR(2, r1)
1149	SAVE_GPR(13, r1)
1150	SAVE_8GPRS(14, r1)
1151	SAVE_10GPRS(22, r1)
1152	mfcr	r10
1153	mfmsr	r11
1154	std	r10,_CCR(r1)
1155	std	r11,_MSR(r1)
1156
1157	/* Put PROM address in SRR0 */
1158	mtsrr0	r4
1159
1160	/* Setup our trampoline return addr in LR */
1161	bcl	20,31,$+4
11620:	mflr	r4
1163	addi	r4,r4,(1f - 0b)
1164       	mtlr	r4
1165
1166	/* Prepare a 32-bit mode big endian MSR
1167	 */
1168#ifdef CONFIG_PPC_BOOK3E
1169	rlwinm	r11,r11,0,1,31
1170	mtsrr1	r11
1171	rfi
1172#else /* CONFIG_PPC_BOOK3E */
1173	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1174	andc	r11,r11,r12
1175	mtsrr1	r11
1176	rfid
1177#endif /* CONFIG_PPC_BOOK3E */
1178
11791:	/* Return from OF */
1180	FIXUP_ENDIAN
1181
1182	/* Just make sure that r1 top 32 bits didn't get
1183	 * corrupt by OF
1184	 */
1185	rldicl	r1,r1,0,32
1186
1187	/* Restore the MSR (back to 64 bits) */
1188	ld	r0,_MSR(r1)
1189	MTMSRD(r0)
1190        isync
1191
1192	/* Restore other registers */
1193	REST_GPR(2, r1)
1194	REST_GPR(13, r1)
1195	REST_8GPRS(14, r1)
1196	REST_10GPRS(22, r1)
1197	ld	r4,_CCR(r1)
1198	mtcr	r4
1199
1200        addi	r1,r1,PROM_FRAME_SIZE
1201	ld	r0,16(r1)
1202	mtlr    r0
1203        blr
1204
1205#ifdef CONFIG_FUNCTION_TRACER
1206#ifdef CONFIG_DYNAMIC_FTRACE
1207_GLOBAL(mcount)
1208_GLOBAL(_mcount)
1209	blr
1210
1211_GLOBAL_TOC(ftrace_caller)
1212	/* Taken from output of objdump from lib64/glibc */
1213	mflr	r3
1214	ld	r11, 0(r1)
1215	stdu	r1, -112(r1)
1216	std	r3, 128(r1)
1217	ld	r4, 16(r11)
1218	subi	r3, r3, MCOUNT_INSN_SIZE
1219.globl ftrace_call
1220ftrace_call:
1221	bl	ftrace_stub
1222	nop
1223#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1224.globl ftrace_graph_call
1225ftrace_graph_call:
1226	b	ftrace_graph_stub
1227_GLOBAL(ftrace_graph_stub)
1228#endif
1229	ld	r0, 128(r1)
1230	mtlr	r0
1231	addi	r1, r1, 112
1232_GLOBAL(ftrace_stub)
1233	blr
1234#else
1235_GLOBAL_TOC(_mcount)
1236	/* Taken from output of objdump from lib64/glibc */
1237	mflr	r3
1238	ld	r11, 0(r1)
1239	stdu	r1, -112(r1)
1240	std	r3, 128(r1)
1241	ld	r4, 16(r11)
1242
1243	subi	r3, r3, MCOUNT_INSN_SIZE
1244	LOAD_REG_ADDR(r5,ftrace_trace_function)
1245	ld	r5,0(r5)
1246	ld	r5,0(r5)
1247	mtctr	r5
1248	bctrl
1249	nop
1250
1251
1252#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1253	b	ftrace_graph_caller
1254#endif
1255	ld	r0, 128(r1)
1256	mtlr	r0
1257	addi	r1, r1, 112
1258_GLOBAL(ftrace_stub)
1259	blr
1260
1261#endif /* CONFIG_DYNAMIC_FTRACE */
1262
1263#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1264_GLOBAL(ftrace_graph_caller)
1265	/* load r4 with local address */
1266	ld	r4, 128(r1)
1267	subi	r4, r4, MCOUNT_INSN_SIZE
1268
1269	/* Grab the LR out of the caller stack frame */
1270	ld	r11, 112(r1)
1271	ld	r3, 16(r11)
1272
1273	bl	prepare_ftrace_return
1274	nop
1275
1276	/*
1277	 * prepare_ftrace_return gives us the address we divert to.
1278	 * Change the LR in the callers stack frame to this.
1279	 */
1280	ld	r11, 112(r1)
1281	std	r3, 16(r11)
1282
1283	ld	r0, 128(r1)
1284	mtlr	r0
1285	addi	r1, r1, 112
1286	blr
1287
1288_GLOBAL(return_to_handler)
1289	/* need to save return values */
1290	std	r4,  -32(r1)
1291	std	r3,  -24(r1)
1292	/* save TOC */
1293	std	r2,  -16(r1)
1294	std	r31, -8(r1)
1295	mr	r31, r1
1296	stdu	r1, -112(r1)
1297
1298	/*
1299	 * We might be called from a module.
1300	 * Switch to our TOC to run inside the core kernel.
1301	 */
1302	ld	r2, PACATOC(r13)
1303
1304	bl	ftrace_return_to_handler
1305	nop
1306
1307	/* return value has real return address */
1308	mtlr	r3
1309
1310	ld	r1, 0(r1)
1311	ld	r4,  -32(r1)
1312	ld	r3,  -24(r1)
1313	ld	r2,  -16(r1)
1314	ld	r31, -8(r1)
1315
1316	/* Jump back to real return address */
1317	blr
1318#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1319#endif /* CONFIG_FUNCTION_TRACER */
1320