1/*
2 * This file contains miscellaneous low-level functions.
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * kexec bits:
9 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
11 * PPC44x port. Copyright (C) 2011,  IBM Corporation
12 * 		Author: Suzuki Poulose <suzuki@in.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/sys.h>
22#include <asm/unistd.h>
23#include <asm/errno.h>
24#include <asm/reg.h>
25#include <asm/page.h>
26#include <asm/cache.h>
27#include <asm/cputable.h>
28#include <asm/mmu.h>
29#include <asm/ppc_asm.h>
30#include <asm/thread_info.h>
31#include <asm/asm-offsets.h>
32#include <asm/processor.h>
33#include <asm/kexec.h>
34#include <asm/bug.h>
35#include <asm/ptrace.h>
36
37	.text
38
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
43_GLOBAL(call_do_softirq)
44	mflr	r0
45	stw	r0,4(r1)
46	lwz	r10,THREAD+KSP_LIMIT(r2)
47	addi	r11,r3,THREAD_INFO_GAP
48	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
49	mr	r1,r3
50	stw	r10,8(r1)
51	stw	r11,THREAD+KSP_LIMIT(r2)
52	bl	__do_softirq
53	lwz	r10,8(r1)
54	lwz	r1,0(r1)
55	lwz	r0,4(r1)
56	stw	r10,THREAD+KSP_LIMIT(r2)
57	mtlr	r0
58	blr
59
60/*
61 * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
62 */
63_GLOBAL(call_do_irq)
64	mflr	r0
65	stw	r0,4(r1)
66	lwz	r10,THREAD+KSP_LIMIT(r2)
67	addi	r11,r4,THREAD_INFO_GAP
68	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
69	mr	r1,r4
70	stw	r10,8(r1)
71	stw	r11,THREAD+KSP_LIMIT(r2)
72	bl	__do_irq
73	lwz	r10,8(r1)
74	lwz	r1,0(r1)
75	lwz	r0,4(r1)
76	stw	r10,THREAD+KSP_LIMIT(r2)
77	mtlr	r0
78	blr
79
80/*
81 * This returns the high 64 bits of the product of two 64-bit numbers.
82 */
83_GLOBAL(mulhdu)
84	cmpwi	r6,0
85	cmpwi	cr1,r3,0
86	mr	r10,r4
87	mulhwu	r4,r4,r5
88	beq	1f
89	mulhwu	r0,r10,r6
90	mullw	r7,r10,r5
91	addc	r7,r0,r7
92	addze	r4,r4
931:	beqlr	cr1		/* all done if high part of A is 0 */
94	mr	r10,r3
95	mullw	r9,r3,r5
96	mulhwu	r3,r3,r5
97	beq	2f
98	mullw	r0,r10,r6
99	mulhwu	r8,r10,r6
100	addc	r7,r0,r7
101	adde	r4,r4,r8
102	addze	r3,r3
1032:	addc	r4,r4,r9
104	addze	r3,r3
105	blr
106
107/*
108 * sub_reloc_offset(x) returns x - reloc_offset().
109 */
110_GLOBAL(sub_reloc_offset)
111	mflr	r0
112	bl	1f
1131:	mflr	r5
114	lis	r4,1b@ha
115	addi	r4,r4,1b@l
116	subf	r5,r4,r5
117	subf	r3,r5,r3
118	mtlr	r0
119	blr
120
121/*
122 * reloc_got2 runs through the .got2 section adding an offset
123 * to each entry.
124 */
125_GLOBAL(reloc_got2)
126	mflr	r11
127	lis	r7,__got2_start@ha
128	addi	r7,r7,__got2_start@l
129	lis	r8,__got2_end@ha
130	addi	r8,r8,__got2_end@l
131	subf	r8,r7,r8
132	srwi.	r8,r8,2
133	beqlr
134	mtctr	r8
135	bl	1f
1361:	mflr	r0
137	lis	r4,1b@ha
138	addi	r4,r4,1b@l
139	subf	r0,r4,r0
140	add	r7,r0,r7
1412:	lwz	r0,0(r7)
142	add	r0,r0,r3
143	stw	r0,0(r7)
144	addi	r7,r7,4
145	bdnz	2b
146	mtlr	r11
147	blr
148
149/*
150 * call_setup_cpu - call the setup_cpu function for this cpu
151 * r3 = data offset, r24 = cpu number
152 *
153 * Setup function is called with:
154 *   r3 = data offset
155 *   r4 = ptr to CPU spec (relocated)
156 */
157_GLOBAL(call_setup_cpu)
158	addis	r4,r3,cur_cpu_spec@ha
159	addi	r4,r4,cur_cpu_spec@l
160	lwz	r4,0(r4)
161	add	r4,r4,r3
162	lwz	r5,CPU_SPEC_SETUP(r4)
163	cmpwi	0,r5,0
164	add	r5,r5,r3
165	beqlr
166	mtctr	r5
167	bctr
168
169#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
170
171/* This gets called by via-pmu.c to switch the PLL selection
172 * on 750fx CPU. This function should really be moved to some
173 * other place (as most of the cpufreq code in via-pmu
174 */
175_GLOBAL(low_choose_750fx_pll)
176	/* Clear MSR:EE */
177	mfmsr	r7
178	rlwinm	r0,r7,0,17,15
179	mtmsr	r0
180
181	/* If switching to PLL1, disable HID0:BTIC */
182	cmplwi	cr0,r3,0
183	beq	1f
184	mfspr	r5,SPRN_HID0
185	rlwinm	r5,r5,0,27,25
186	sync
187	mtspr	SPRN_HID0,r5
188	isync
189	sync
190
1911:
192	/* Calc new HID1 value */
193	mfspr	r4,SPRN_HID1	/* Build a HID1:PS bit from parameter */
194	rlwinm	r5,r3,16,15,15	/* Clear out HID1:PS from value read */
195	rlwinm	r4,r4,0,16,14	/* Could have I used rlwimi here ? */
196	or	r4,r4,r5
197	mtspr	SPRN_HID1,r4
198
199	/* Store new HID1 image */
200	CURRENT_THREAD_INFO(r6, r1)
201	lwz	r6,TI_CPU(r6)
202	slwi	r6,r6,2
203	addis	r6,r6,nap_save_hid1@ha
204	stw	r4,nap_save_hid1@l(r6)
205
206	/* If switching to PLL0, enable HID0:BTIC */
207	cmplwi	cr0,r3,0
208	bne	1f
209	mfspr	r5,SPRN_HID0
210	ori	r5,r5,HID0_BTIC
211	sync
212	mtspr	SPRN_HID0,r5
213	isync
214	sync
215
2161:
217	/* Return */
218	mtmsr	r7
219	blr
220
221_GLOBAL(low_choose_7447a_dfs)
222	/* Clear MSR:EE */
223	mfmsr	r7
224	rlwinm	r0,r7,0,17,15
225	mtmsr	r0
226
227	/* Calc new HID1 value */
228	mfspr	r4,SPRN_HID1
229	insrwi	r4,r3,1,9	/* insert parameter into bit 9 */
230	sync
231	mtspr	SPRN_HID1,r4
232	sync
233	isync
234
235	/* Return */
236	mtmsr	r7
237	blr
238
239#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
240
241/*
242 * complement mask on the msr then "or" some values on.
243 *     _nmask_and_or_msr(nmask, value_to_or)
244 */
245_GLOBAL(_nmask_and_or_msr)
246	mfmsr	r0		/* Get current msr */
247	andc	r0,r0,r3	/* And off the bits set in r3 (first parm) */
248	or	r0,r0,r4	/* Or on the bits in r4 (second parm) */
249	SYNC			/* Some chip revs have problems here... */
250	mtmsr	r0		/* Update machine state */
251	isync
252	blr			/* Done */
253
254#ifdef CONFIG_40x
255
256/*
257 * Do an IO access in real mode
258 */
259_GLOBAL(real_readb)
260	mfmsr	r7
261	ori	r0,r7,MSR_DR
262	xori	r0,r0,MSR_DR
263	sync
264	mtmsr	r0
265	sync
266	isync
267	lbz	r3,0(r3)
268	sync
269	mtmsr	r7
270	sync
271	isync
272	blr
273
274	/*
275 * Do an IO access in real mode
276 */
277_GLOBAL(real_writeb)
278	mfmsr	r7
279	ori	r0,r7,MSR_DR
280	xori	r0,r0,MSR_DR
281	sync
282	mtmsr	r0
283	sync
284	isync
285	stb	r3,0(r4)
286	sync
287	mtmsr	r7
288	sync
289	isync
290	blr
291
292#endif /* CONFIG_40x */
293
294
295/*
296 * Flush instruction cache.
297 * This is a no-op on the 601.
298 */
299_GLOBAL(flush_instruction_cache)
300#if defined(CONFIG_8xx)
301	isync
302	lis	r5, IDC_INVALL@h
303	mtspr	SPRN_IC_CST, r5
304#elif defined(CONFIG_4xx)
305#ifdef CONFIG_403GCX
306	li      r3, 512
307	mtctr   r3
308	lis     r4, KERNELBASE@h
3091:	iccci   0, r4
310	addi    r4, r4, 16
311	bdnz    1b
312#else
313	lis	r3, KERNELBASE@h
314	iccci	0,r3
315#endif
316#elif CONFIG_FSL_BOOKE
317BEGIN_FTR_SECTION
318	mfspr   r3,SPRN_L1CSR0
319	ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC
320	/* msync; isync recommended here */
321	mtspr   SPRN_L1CSR0,r3
322	isync
323	blr
324END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
325	mfspr	r3,SPRN_L1CSR1
326	ori	r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
327	mtspr	SPRN_L1CSR1,r3
328#else
329	mfspr	r3,SPRN_PVR
330	rlwinm	r3,r3,16,16,31
331	cmpwi	0,r3,1
332	beqlr			/* for 601, do nothing */
333	/* 603/604 processor - use invalidate-all bit in HID0 */
334	mfspr	r3,SPRN_HID0
335	ori	r3,r3,HID0_ICFI
336	mtspr	SPRN_HID0,r3
337#endif /* CONFIG_8xx/4xx */
338	isync
339	blr
340
341/*
342 * Write any modified data cache blocks out to memory
343 * and invalidate the corresponding instruction cache blocks.
344 * This is a no-op on the 601.
345 *
346 * flush_icache_range(unsigned long start, unsigned long stop)
347 */
348_KPROBE(flush_icache_range)
349BEGIN_FTR_SECTION
350	PURGE_PREFETCHED_INS
351	blr				/* for 601, do nothing */
352END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
353	li	r5,L1_CACHE_BYTES-1
354	andc	r3,r3,r5
355	subf	r4,r3,r4
356	add	r4,r4,r5
357	srwi.	r4,r4,L1_CACHE_SHIFT
358	beqlr
359	mtctr	r4
360	mr	r6,r3
3611:	dcbst	0,r3
362	addi	r3,r3,L1_CACHE_BYTES
363	bdnz	1b
364	sync				/* wait for dcbst's to get to ram */
365#ifndef CONFIG_44x
366	mtctr	r4
3672:	icbi	0,r6
368	addi	r6,r6,L1_CACHE_BYTES
369	bdnz	2b
370#else
371	/* Flash invalidate on 44x because we are passed kmapped addresses and
372	   this doesn't work for userspace pages due to the virtually tagged
373	   icache.  Sigh. */
374	iccci	0, r0
375#endif
376	sync				/* additional sync needed on g4 */
377	isync
378	blr
379/*
380 * Write any modified data cache blocks out to memory.
381 * Does not invalidate the corresponding cache lines (especially for
382 * any corresponding instruction cache).
383 *
384 * clean_dcache_range(unsigned long start, unsigned long stop)
385 */
386_GLOBAL(clean_dcache_range)
387	li	r5,L1_CACHE_BYTES-1
388	andc	r3,r3,r5
389	subf	r4,r3,r4
390	add	r4,r4,r5
391	srwi.	r4,r4,L1_CACHE_SHIFT
392	beqlr
393	mtctr	r4
394
3951:	dcbst	0,r3
396	addi	r3,r3,L1_CACHE_BYTES
397	bdnz	1b
398	sync				/* wait for dcbst's to get to ram */
399	blr
400
401/*
402 * Write any modified data cache blocks out to memory and invalidate them.
403 * Does not invalidate the corresponding instruction cache blocks.
404 *
405 * flush_dcache_range(unsigned long start, unsigned long stop)
406 */
407_GLOBAL(flush_dcache_range)
408	li	r5,L1_CACHE_BYTES-1
409	andc	r3,r3,r5
410	subf	r4,r3,r4
411	add	r4,r4,r5
412	srwi.	r4,r4,L1_CACHE_SHIFT
413	beqlr
414	mtctr	r4
415
4161:	dcbf	0,r3
417	addi	r3,r3,L1_CACHE_BYTES
418	bdnz	1b
419	sync				/* wait for dcbst's to get to ram */
420	blr
421
422/*
423 * Like above, but invalidate the D-cache.  This is used by the 8xx
424 * to invalidate the cache so the PPC core doesn't get stale data
425 * from the CPM (no cache snooping here :-).
426 *
427 * invalidate_dcache_range(unsigned long start, unsigned long stop)
428 */
429_GLOBAL(invalidate_dcache_range)
430	li	r5,L1_CACHE_BYTES-1
431	andc	r3,r3,r5
432	subf	r4,r3,r4
433	add	r4,r4,r5
434	srwi.	r4,r4,L1_CACHE_SHIFT
435	beqlr
436	mtctr	r4
437
4381:	dcbi	0,r3
439	addi	r3,r3,L1_CACHE_BYTES
440	bdnz	1b
441	sync				/* wait for dcbi's to get to ram */
442	blr
443
444/*
445 * Flush a particular page from the data cache to RAM.
446 * Note: this is necessary because the instruction cache does *not*
447 * snoop from the data cache.
448 * This is a no-op on the 601 which has a unified cache.
449 *
450 *	void __flush_dcache_icache(void *page)
451 */
452_GLOBAL(__flush_dcache_icache)
453BEGIN_FTR_SECTION
454	PURGE_PREFETCHED_INS
455	blr
456END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
457	rlwinm	r3,r3,0,0,31-PAGE_SHIFT		/* Get page base address */
458	li	r4,PAGE_SIZE/L1_CACHE_BYTES	/* Number of lines in a page */
459	mtctr	r4
460	mr	r6,r3
4610:	dcbst	0,r3				/* Write line to ram */
462	addi	r3,r3,L1_CACHE_BYTES
463	bdnz	0b
464	sync
465#ifdef CONFIG_44x
466	/* We don't flush the icache on 44x. Those have a virtual icache
467	 * and we don't have access to the virtual address here (it's
468	 * not the page vaddr but where it's mapped in user space). The
469	 * flushing of the icache on these is handled elsewhere, when
470	 * a change in the address space occurs, before returning to
471	 * user space
472	 */
473BEGIN_MMU_FTR_SECTION
474	blr
475END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
476#endif /* CONFIG_44x */
477	mtctr	r4
4781:	icbi	0,r6
479	addi	r6,r6,L1_CACHE_BYTES
480	bdnz	1b
481	sync
482	isync
483	blr
484
485#ifndef CONFIG_BOOKE
486/*
487 * Flush a particular page from the data cache to RAM, identified
488 * by its physical address.  We turn off the MMU so we can just use
489 * the physical address (this may be a highmem page without a kernel
490 * mapping).
491 *
492 *	void __flush_dcache_icache_phys(unsigned long physaddr)
493 */
494_GLOBAL(__flush_dcache_icache_phys)
495BEGIN_FTR_SECTION
496	PURGE_PREFETCHED_INS
497	blr					/* for 601, do nothing */
498END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
499	mfmsr	r10
500	rlwinm	r0,r10,0,28,26			/* clear DR */
501	mtmsr	r0
502	isync
503	rlwinm	r3,r3,0,0,31-PAGE_SHIFT		/* Get page base address */
504	li	r4,PAGE_SIZE/L1_CACHE_BYTES	/* Number of lines in a page */
505	mtctr	r4
506	mr	r6,r3
5070:	dcbst	0,r3				/* Write line to ram */
508	addi	r3,r3,L1_CACHE_BYTES
509	bdnz	0b
510	sync
511	mtctr	r4
5121:	icbi	0,r6
513	addi	r6,r6,L1_CACHE_BYTES
514	bdnz	1b
515	sync
516	mtmsr	r10				/* restore DR */
517	isync
518	blr
519#endif /* CONFIG_BOOKE */
520
521/*
522 * Clear pages using the dcbz instruction, which doesn't cause any
523 * memory traffic (except to write out any cache lines which get
524 * displaced).  This only works on cacheable memory.
525 *
526 * void clear_pages(void *page, int order) ;
527 */
528_GLOBAL(clear_pages)
529	li	r0,PAGE_SIZE/L1_CACHE_BYTES
530	slw	r0,r0,r4
531	mtctr	r0
5321:	dcbz	0,r3
533	addi	r3,r3,L1_CACHE_BYTES
534	bdnz	1b
535	blr
536
537/*
538 * Copy a whole page.  We use the dcbz instruction on the destination
539 * to reduce memory traffic (it eliminates the unnecessary reads of
540 * the destination into cache).  This requires that the destination
541 * is cacheable.
542 */
543#define COPY_16_BYTES		\
544	lwz	r6,4(r4);	\
545	lwz	r7,8(r4);	\
546	lwz	r8,12(r4);	\
547	lwzu	r9,16(r4);	\
548	stw	r6,4(r3);	\
549	stw	r7,8(r3);	\
550	stw	r8,12(r3);	\
551	stwu	r9,16(r3)
552
553_GLOBAL(copy_page)
554	addi	r3,r3,-4
555	addi	r4,r4,-4
556
557	li	r5,4
558
559#if MAX_COPY_PREFETCH > 1
560	li	r0,MAX_COPY_PREFETCH
561	li	r11,4
562	mtctr	r0
56311:	dcbt	r11,r4
564	addi	r11,r11,L1_CACHE_BYTES
565	bdnz	11b
566#else /* MAX_COPY_PREFETCH == 1 */
567	dcbt	r5,r4
568	li	r11,L1_CACHE_BYTES+4
569#endif /* MAX_COPY_PREFETCH */
570	li	r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
571	crclr	4*cr0+eq
5722:
573	mtctr	r0
5741:
575	dcbt	r11,r4
576	dcbz	r5,r3
577	COPY_16_BYTES
578#if L1_CACHE_BYTES >= 32
579	COPY_16_BYTES
580#if L1_CACHE_BYTES >= 64
581	COPY_16_BYTES
582	COPY_16_BYTES
583#if L1_CACHE_BYTES >= 128
584	COPY_16_BYTES
585	COPY_16_BYTES
586	COPY_16_BYTES
587	COPY_16_BYTES
588#endif
589#endif
590#endif
591	bdnz	1b
592	beqlr
593	crnot	4*cr0+eq,4*cr0+eq
594	li	r0,MAX_COPY_PREFETCH
595	li	r11,4
596	b	2b
597
598/*
599 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
600 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
601 */
602_GLOBAL(atomic_clear_mask)
60310:	lwarx	r5,0,r4
604	andc	r5,r5,r3
605	PPC405_ERR77(0,r4)
606	stwcx.	r5,0,r4
607	bne-	10b
608	blr
609_GLOBAL(atomic_set_mask)
61010:	lwarx	r5,0,r4
611	or	r5,r5,r3
612	PPC405_ERR77(0,r4)
613	stwcx.	r5,0,r4
614	bne-	10b
615	blr
616
617/*
618 * Extended precision shifts.
619 *
620 * Updated to be valid for shift counts from 0 to 63 inclusive.
621 * -- Gabriel
622 *
623 * R3/R4 has 64 bit value
624 * R5    has shift count
625 * result in R3/R4
626 *
627 *  ashrdi3: arithmetic right shift (sign propagation)
628 *  lshrdi3: logical right shift
629 *  ashldi3: left shift
630 */
631_GLOBAL(__ashrdi3)
632	subfic	r6,r5,32
633	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
634	addi	r7,r5,32	# could be xori, or addi with -32
635	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
636	rlwinm	r8,r7,0,32	# t3 = (count < 32) ? 32 : 0
637	sraw	r7,r3,r7	# t2 = MSW >> (count-32)
638	or	r4,r4,r6	# LSW |= t1
639	slw	r7,r7,r8	# t2 = (count < 32) ? 0 : t2
640	sraw	r3,r3,r5	# MSW = MSW >> count
641	or	r4,r4,r7	# LSW |= t2
642	blr
643
644_GLOBAL(__ashldi3)
645	subfic	r6,r5,32
646	slw	r3,r3,r5	# MSW = count > 31 ? 0 : MSW << count
647	addi	r7,r5,32	# could be xori, or addi with -32
648	srw	r6,r4,r6	# t1 = count > 31 ? 0 : LSW >> (32-count)
649	slw	r7,r4,r7	# t2 = count < 32 ? 0 : LSW << (count-32)
650	or	r3,r3,r6	# MSW |= t1
651	slw	r4,r4,r5	# LSW = LSW << count
652	or	r3,r3,r7	# MSW |= t2
653	blr
654
655_GLOBAL(__lshrdi3)
656	subfic	r6,r5,32
657	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
658	addi	r7,r5,32	# could be xori, or addi with -32
659	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
660	srw	r7,r3,r7	# t2 = count < 32 ? 0 : MSW >> (count-32)
661	or	r4,r4,r6	# LSW |= t1
662	srw	r3,r3,r5	# MSW = MSW >> count
663	or	r4,r4,r7	# LSW |= t2
664	blr
665
666/*
667 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
668 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
669 */
670_GLOBAL(__cmpdi2)
671	cmpw	r3,r5
672	li	r3,1
673	bne	1f
674	cmplw	r4,r6
675	beqlr
6761:	li	r3,0
677	bltlr
678	li	r3,2
679	blr
680/*
681 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
682 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
683 */
684_GLOBAL(__ucmpdi2)
685	cmplw	r3,r5
686	li	r3,1
687	bne	1f
688	cmplw	r4,r6
689	beqlr
6901:	li	r3,0
691	bltlr
692	li	r3,2
693	blr
694
695_GLOBAL(__bswapdi2)
696	rotlwi  r9,r4,8
697	rotlwi  r10,r3,8
698	rlwimi  r9,r4,24,0,7
699	rlwimi  r10,r3,24,0,7
700	rlwimi  r9,r4,24,16,23
701	rlwimi  r10,r3,24,16,23
702	mr      r3,r9
703	mr      r4,r10
704	blr
705
706_GLOBAL(abs)
707	srawi	r4,r3,31
708	xor	r3,r3,r4
709	sub	r3,r3,r4
710	blr
711
712#ifdef CONFIG_SMP
713_GLOBAL(start_secondary_resume)
714	/* Reset stack */
715	CURRENT_THREAD_INFO(r1, r1)
716	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
717	li	r3,0
718	stw	r3,0(r1)		/* Zero the stack frame pointer	*/
719	bl	start_secondary
720	b	.
721#endif /* CONFIG_SMP */
722
723/*
724 * This routine is just here to keep GCC happy - sigh...
725 */
726_GLOBAL(__main)
727	blr
728
729#ifdef CONFIG_KEXEC
730	/*
731	 * Must be relocatable PIC code callable as a C function.
732	 */
733	.globl relocate_new_kernel
734relocate_new_kernel:
735	/* r3 = page_list   */
736	/* r4 = reboot_code_buffer */
737	/* r5 = start_address      */
738
739#ifdef CONFIG_FSL_BOOKE
740
741	mr	r29, r3
742	mr	r30, r4
743	mr	r31, r5
744
745#define ENTRY_MAPPING_KEXEC_SETUP
746#include "fsl_booke_entry_mapping.S"
747#undef ENTRY_MAPPING_KEXEC_SETUP
748
749	mr      r3, r29
750	mr      r4, r30
751	mr      r5, r31
752
753	li	r0, 0
754#elif defined(CONFIG_44x)
755
756	/* Save our parameters */
757	mr	r29, r3
758	mr	r30, r4
759	mr	r31, r5
760
761#ifdef CONFIG_PPC_47x
762	/* Check for 47x cores */
763	mfspr	r3,SPRN_PVR
764	srwi	r3,r3,16
765	cmplwi	cr0,r3,PVR_476@h
766	beq	setup_map_47x
767	cmplwi	cr0,r3,PVR_476_ISS@h
768	beq	setup_map_47x
769#endif /* CONFIG_PPC_47x */
770
771/*
772 * Code for setting up 1:1 mapping for PPC440x for KEXEC
773 *
774 * We cannot switch off the MMU on PPC44x.
775 * So we:
776 * 1) Invalidate all the mappings except the one we are running from.
777 * 2) Create a tmp mapping for our code in the other address space(TS) and
778 *    jump to it. Invalidate the entry we started in.
779 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
780 * 4) Jump to the 1:1 mapping in original TS.
781 * 5) Invalidate the tmp mapping.
782 *
783 * - Based on the kexec support code for FSL BookE
784 *
785 */
786
787	/*
788	 * Load the PID with kernel PID (0).
789	 * Also load our MSR_IS and TID to MMUCR for TLB search.
790	 */
791	li	r3, 0
792	mtspr	SPRN_PID, r3
793	mfmsr	r4
794	andi.	r4,r4,MSR_IS@l
795	beq	wmmucr
796	oris	r3,r3,PPC44x_MMUCR_STS@h
797wmmucr:
798	mtspr	SPRN_MMUCR,r3
799	sync
800
801	/*
802	 * Invalidate all the TLB entries except the current entry
803	 * where we are running from
804	 */
805	bl	0f				/* Find our address */
8060:	mflr	r5				/* Make it accessible */
807	tlbsx	r23,0,r5			/* Find entry we are in */
808	li	r4,0				/* Start at TLB entry 0 */
809	li	r3,0				/* Set PAGEID inval value */
8101:	cmpw	r23,r4				/* Is this our entry? */
811	beq	skip				/* If so, skip the inval */
812	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
813skip:
814	addi	r4,r4,1				/* Increment */
815	cmpwi	r4,64				/* Are we done?	*/
816	bne	1b				/* If not, repeat */
817	isync
818
819	/* Create a temp mapping and jump to it */
820	andi.	r6, r23, 1		/* Find the index to use */
821	addi	r24, r6, 1		/* r24 will contain 1 or 2 */
822
823	mfmsr	r9			/* get the MSR */
824	rlwinm	r5, r9, 27, 31, 31	/* Extract the MSR[IS] */
825	xori	r7, r5, 1		/* Use the other address space */
826
827	/* Read the current mapping entries */
828	tlbre	r3, r23, PPC44x_TLB_PAGEID
829	tlbre	r4, r23, PPC44x_TLB_XLAT
830	tlbre	r5, r23, PPC44x_TLB_ATTRIB
831
832	/* Save our current XLAT entry */
833	mr	r25, r4
834
835	/* Extract the TLB PageSize */
836	li	r10, 1 			/* r10 will hold PageSize */
837	rlwinm	r11, r3, 0, 24, 27	/* bits 24-27 */
838
839	/* XXX: As of now we use 256M, 4K pages */
840	cmpwi	r11, PPC44x_TLB_256M
841	bne	tlb_4k
842	rotlwi	r10, r10, 28		/* r10 = 256M */
843	b	write_out
844tlb_4k:
845	cmpwi	r11, PPC44x_TLB_4K
846	bne	default
847	rotlwi	r10, r10, 12		/* r10 = 4K */
848	b	write_out
849default:
850	rotlwi	r10, r10, 10		/* r10 = 1K */
851
852write_out:
853	/*
854	 * Write out the tmp 1:1 mapping for this code in other address space
855	 * Fixup  EPN = RPN , TS=other address space
856	 */
857	insrwi	r3, r7, 1, 23		/* Bit 23 is TS for PAGEID field */
858
859	/* Write out the tmp mapping entries */
860	tlbwe	r3, r24, PPC44x_TLB_PAGEID
861	tlbwe	r4, r24, PPC44x_TLB_XLAT
862	tlbwe	r5, r24, PPC44x_TLB_ATTRIB
863
864	subi	r11, r10, 1		/* PageOffset Mask = PageSize - 1 */
865	not	r10, r11		/* Mask for PageNum */
866
867	/* Switch to other address space in MSR */
868	insrwi	r9, r7, 1, 26		/* Set MSR[IS] = r7 */
869
870	bl	1f
8711:	mflr	r8
872	addi	r8, r8, (2f-1b)		/* Find the target offset */
873
874	/* Jump to the tmp mapping */
875	mtspr	SPRN_SRR0, r8
876	mtspr	SPRN_SRR1, r9
877	rfi
878
8792:
880	/* Invalidate the entry we were executing from */
881	li	r3, 0
882	tlbwe	r3, r23, PPC44x_TLB_PAGEID
883
884	/* attribute fields. rwx for SUPERVISOR mode */
885	li	r5, 0
886	ori	r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
887
888	/* Create 1:1 mapping in 256M pages */
889	xori	r7, r7, 1			/* Revert back to Original TS */
890
891	li	r8, 0				/* PageNumber */
892	li	r6, 3				/* TLB Index, start at 3  */
893
894next_tlb:
895	rotlwi	r3, r8, 28			/* Create EPN (bits 0-3) */
896	mr	r4, r3				/* RPN = EPN  */
897	ori	r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
898	insrwi	r3, r7, 1, 23			/* Set TS from r7 */
899
900	tlbwe	r3, r6, PPC44x_TLB_PAGEID	/* PageID field : EPN, V, SIZE */
901	tlbwe	r4, r6, PPC44x_TLB_XLAT		/* Address translation : RPN   */
902	tlbwe	r5, r6, PPC44x_TLB_ATTRIB	/* Attributes */
903
904	addi	r8, r8, 1			/* Increment PN */
905	addi	r6, r6, 1			/* Increment TLB Index */
906	cmpwi	r8, 8				/* Are we done ? */
907	bne	next_tlb
908	isync
909
910	/* Jump to the new mapping 1:1 */
911	li	r9,0
912	insrwi	r9, r7, 1, 26			/* Set MSR[IS] = r7 */
913
914	bl	1f
9151:	mflr	r8
916	and	r8, r8, r11			/* Get our offset within page */
917	addi	r8, r8, (2f-1b)
918
919	and	r5, r25, r10			/* Get our target PageNum */
920	or	r8, r8, r5			/* Target jump address */
921
922	mtspr	SPRN_SRR0, r8
923	mtspr	SPRN_SRR1, r9
924	rfi
9252:
926	/* Invalidate the tmp entry we used */
927	li	r3, 0
928	tlbwe	r3, r24, PPC44x_TLB_PAGEID
929	sync
930	b	ppc44x_map_done
931
932#ifdef CONFIG_PPC_47x
933
934	/* 1:1 mapping for 47x */
935
936setup_map_47x:
937
938	/*
939	 * Load the kernel pid (0) to PID and also to MMUCR[TID].
940	 * Also set the MSR IS->MMUCR STS
941	 */
942	li	r3, 0
943	mtspr	SPRN_PID, r3			/* Set PID */
944	mfmsr	r4				/* Get MSR */
945	andi.	r4, r4, MSR_IS@l		/* TS=1? */
946	beq	1f				/* If not, leave STS=0 */
947	oris	r3, r3, PPC47x_MMUCR_STS@h	/* Set STS=1 */
9481:	mtspr	SPRN_MMUCR, r3			/* Put MMUCR */
949	sync
950
951	/* Find the entry we are running from */
952	bl	2f
9532:	mflr	r23
954	tlbsx	r23, 0, r23
955	tlbre	r24, r23, 0			/* TLB Word 0 */
956	tlbre	r25, r23, 1			/* TLB Word 1 */
957	tlbre	r26, r23, 2			/* TLB Word 2 */
958
959
960	/*
961	 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
962	 * of 4k page size in all  4 ways (0-3 in r3).
963	 * This would invalidate the entire UTLB including the one we are
964	 * running from. However the shadow TLB entries would help us
965	 * to continue the execution, until we flush them (rfi/isync).
966	 */
967	addis	r3, 0, 0x8000			/* specify the way */
968	addi	r4, 0, 0			/* TLB Word0 = (EPN=0, VALID = 0) */
969	addi	r5, 0, 0
970	b	clear_utlb_entry
971
972	/* Align the loop to speed things up. from head_44x.S */
973	.align	6
974
975clear_utlb_entry:
976
977	tlbwe	r4, r3, 0
978	tlbwe	r5, r3, 1
979	tlbwe	r5, r3, 2
980	addis	r3, r3, 0x2000			/* Increment the way */
981	cmpwi	r3, 0
982	bne	clear_utlb_entry
983	addis	r3, 0, 0x8000
984	addis	r4, r4, 0x100			/* Increment the EPN */
985	cmpwi	r4, 0
986	bne	clear_utlb_entry
987
988	/* Create the entries in the other address space */
989	mfmsr	r5
990	rlwinm	r7, r5, 27, 31, 31		/* Get the TS (Bit 26) from MSR */
991	xori	r7, r7, 1			/* r7 = !TS */
992
993	insrwi	r24, r7, 1, 21			/* Change the TS in the saved TLB word 0 */
994
995	/*
996	 * write out the TLB entries for the tmp mapping
997	 * Use way '0' so that we could easily invalidate it later.
998	 */
999	lis	r3, 0x8000			/* Way '0' */
1000
1001	tlbwe	r24, r3, 0
1002	tlbwe	r25, r3, 1
1003	tlbwe	r26, r3, 2
1004
1005	/* Update the msr to the new TS */
1006	insrwi	r5, r7, 1, 26
1007
1008	bl	1f
10091:	mflr	r6
1010	addi	r6, r6, (2f-1b)
1011
1012	mtspr	SPRN_SRR0, r6
1013	mtspr	SPRN_SRR1, r5
1014	rfi
1015
1016	/*
1017	 * Now we are in the tmp address space.
1018	 * Create a 1:1 mapping for 0-2GiB in the original TS.
1019	 */
10202:
1021	li	r3, 0
1022	li	r4, 0				/* TLB Word 0 */
1023	li	r5, 0				/* TLB Word 1 */
1024	li	r6, 0
1025	ori	r6, r6, PPC47x_TLB2_S_RWX	/* TLB word 2 */
1026
1027	li	r8, 0				/* PageIndex */
1028
1029	xori	r7, r7, 1			/* revert back to original TS */
1030
1031write_utlb:
1032	rotlwi	r5, r8, 28			/* RPN = PageIndex * 256M */
1033						/* ERPN = 0 as we don't use memory above 2G */
1034
1035	mr	r4, r5				/* EPN = RPN */
1036	ori	r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
1037	insrwi	r4, r7, 1, 21			/* Insert the TS to Word 0 */
1038
1039	tlbwe	r4, r3, 0			/* Write out the entries */
1040	tlbwe	r5, r3, 1
1041	tlbwe	r6, r3, 2
1042	addi	r8, r8, 1
1043	cmpwi	r8, 8				/* Have we completed ? */
1044	bne	write_utlb
1045
1046	/* make sure we complete the TLB write up */
1047	isync
1048
1049	/*
1050	 * Prepare to jump to the 1:1 mapping.
1051	 * 1) Extract page size of the tmp mapping
1052	 *    DSIZ = TLB_Word0[22:27]
1053	 * 2) Calculate the physical address of the address
1054	 *    to jump to.
1055	 */
1056	rlwinm	r10, r24, 0, 22, 27
1057
1058	cmpwi	r10, PPC47x_TLB0_4K
1059	bne	0f
1060	li	r10, 0x1000			/* r10 = 4k */
1061	bl	1f
1062
10630:
1064	/* Defaults to 256M */
1065	lis	r10, 0x1000
1066
1067	bl	1f
10681:	mflr	r4
1069	addi	r4, r4, (2f-1b)			/* virtual address  of 2f */
1070
1071	subi	r11, r10, 1			/* offsetmask = Pagesize - 1 */
1072	not	r10, r11			/* Pagemask = ~(offsetmask) */
1073
1074	and	r5, r25, r10			/* Physical page */
1075	and	r6, r4, r11			/* offset within the current page */
1076
1077	or	r5, r5, r6			/* Physical address for 2f */
1078
1079	/* Switch the TS in MSR to the original one */
1080	mfmsr	r8
1081	insrwi	r8, r7, 1, 26
1082
1083	mtspr	SPRN_SRR1, r8
1084	mtspr	SPRN_SRR0, r5
1085	rfi
1086
10872:
1088	/* Invalidate the tmp mapping */
1089	lis	r3, 0x8000			/* Way '0' */
1090
1091	clrrwi	r24, r24, 12			/* Clear the valid bit */
1092	tlbwe	r24, r3, 0
1093	tlbwe	r25, r3, 1
1094	tlbwe	r26, r3, 2
1095
1096	/* Make sure we complete the TLB write and flush the shadow TLB */
1097	isync
1098
1099#endif
1100
1101ppc44x_map_done:
1102
1103
1104	/* Restore the parameters */
1105	mr	r3, r29
1106	mr	r4, r30
1107	mr	r5, r31
1108
1109	li	r0, 0
1110#else
1111	li	r0, 0
1112
1113	/*
1114	 * Set Machine Status Register to a known status,
1115	 * switch the MMU off and jump to 1: in a single step.
1116	 */
1117
1118	mr	r8, r0
1119	ori     r8, r8, MSR_RI|MSR_ME
1120	mtspr	SPRN_SRR1, r8
1121	addi	r8, r4, 1f - relocate_new_kernel
1122	mtspr	SPRN_SRR0, r8
1123	sync
1124	rfi
1125
11261:
1127#endif
1128	/* from this point address translation is turned off */
1129	/* and interrupts are disabled */
1130
1131	/* set a new stack at the bottom of our page... */
1132	/* (not really needed now) */
1133	addi	r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
1134	stw	r0, 0(r1)
1135
1136	/* Do the copies */
1137	li	r6, 0 /* checksum */
1138	mr	r0, r3
1139	b	1f
1140
11410:	/* top, read another word for the indirection page */
1142	lwzu	r0, 4(r3)
1143
11441:
1145	/* is it a destination page? (r8) */
1146	rlwinm.	r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
1147	beq	2f
1148
1149	rlwinm	r8, r0, 0, 0, 19 /* clear kexec flags, page align */
1150	b	0b
1151
11522:	/* is it an indirection page? (r3) */
1153	rlwinm.	r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
1154	beq	2f
1155
1156	rlwinm	r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1157	subi	r3, r3, 4
1158	b	0b
1159
11602:	/* are we done? */
1161	rlwinm.	r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1162	beq	2f
1163	b	3f
1164
11652:	/* is it a source page? (r9) */
1166	rlwinm.	r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1167	beq	0b
1168
1169	rlwinm	r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1170
1171	li	r7, PAGE_SIZE / 4
1172	mtctr   r7
1173	subi    r9, r9, 4
1174	subi    r8, r8, 4
11759:
1176	lwzu    r0, 4(r9)  /* do the copy */
1177	xor	r6, r6, r0
1178	stwu    r0, 4(r8)
1179	dcbst	0, r8
1180	sync
1181	icbi	0, r8
1182	bdnz    9b
1183
1184	addi    r9, r9, 4
1185	addi    r8, r8, 4
1186	b	0b
1187
11883:
1189
1190	/* To be certain of avoiding problems with self-modifying code
1191	 * execute a serializing instruction here.
1192	 */
1193	isync
1194	sync
1195
1196	mfspr	r3, SPRN_PIR /* current core we are running on */
1197	mr	r4, r5 /* load physical address of chunk called */
1198
1199	/* jump to the entry point, usually the setup routine */
1200	mtlr	r5
1201	blrl
1202
12031:	b	1b
1204
1205relocate_new_kernel_end:
1206
1207	.globl relocate_new_kernel_size
1208relocate_new_kernel_size:
1209	.long relocate_new_kernel_end - relocate_new_kernel
1210#endif
1211