1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <linux/linkage.h>
16#include <linux/unistd.h>
17#include <asm/irqflags.h>
18#include <asm/processor.h>
19#include <arch/abi.h>
20#include <arch/spr_def.h>
21
22#ifdef __tilegx__
23#define bnzt bnezt
24#endif
25
26STD_ENTRY(current_text_addr)
27	{ move r0, lr; jrp lr }
28	STD_ENDPROC(current_text_addr)
29
30STD_ENTRY(dump_stack)
31	{ move r2, lr; lnk r1 }
32	{ move r4, r52; addli r1, r1, dump_stack - . }
33	{ move r3, sp; j _dump_stack }
34	jrp lr   /* keep backtracer happy */
35	STD_ENDPROC(dump_stack)
36
37STD_ENTRY(KBacktraceIterator_init_current)
38	{ move r2, lr; lnk r1 }
39	{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
40	{ move r3, sp; j _KBacktraceIterator_init_current }
41	jrp lr   /* keep backtracer happy */
42	STD_ENDPROC(KBacktraceIterator_init_current)
43
44/* Loop forever on a nap during SMP boot. */
45STD_ENTRY(smp_nap)
46	nap
47	nop       /* avoid provoking the icache prefetch with a jump */
48	j smp_nap /* we are not architecturally guaranteed not to exit nap */
49	jrp lr    /* clue in the backtracer */
50	STD_ENDPROC(smp_nap)
51
52/*
53 * Enable interrupts racelessly and then nap until interrupted.
54 * Architecturally, we are guaranteed that enabling interrupts via
55 * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
56 * This function's _cpu_idle_nap address is special; see intvec.S.
57 * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
58 * as a result return to the function that called _cpu_idle().
59 */
60STD_ENTRY(_cpu_idle)
61	movei r1, 1
62	IRQ_ENABLE_LOAD(r2, r3)
63	mtspr INTERRUPT_CRITICAL_SECTION, r1
64	IRQ_ENABLE_APPLY(r2, r3)       /* unmask, but still with ICS set */
65	mtspr INTERRUPT_CRITICAL_SECTION, zero
66	.global _cpu_idle_nap
67_cpu_idle_nap:
68	nap
69	nop       /* avoid provoking the icache prefetch with a jump */
70	jrp lr
71	STD_ENDPROC(_cpu_idle)
72