This source file includes following definitions.
- set_er
- get_er
1
2
3
4
5
6
7
8
9
10 #ifndef _XTENSA_PROCESSOR_H
11 #define _XTENSA_PROCESSOR_H
12
13 #include <asm/core.h>
14
15 #include <linux/compiler.h>
16 #include <linux/stringify.h>
17 #include <asm/ptrace.h>
18 #include <asm/types.h>
19 #include <asm/regs.h>
20
21
22
23 #if (XCHAL_HAVE_WINDOWED != 1)
24 # error Linux requires the Xtensa Windowed Registers Option.
25 #endif
26
27
28
29 #define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
30
31 #define ARCH_SLAB_MINALIGN STACK_ALIGN
32
33
34
35
36
37
38
39
40
41 #ifdef CONFIG_MMU
42 #define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
43 #else
44 #define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
45 #endif
46
47 #define STACK_TOP TASK_SIZE
48 #define STACK_TOP_MAX STACK_TOP
49
50
51
52
53
54
55
56 #define EXCCAUSE_MAPPED_NMI 62
57
58
59
60
61
62
63
64
65
66
67 #define EXCCAUSE_MAPPED_DEBUG 63
68
69
70
71
72
73
74
75
76
77 #define VALID_DOUBLE_EXCEPTION_ADDRESS 64
78
79 #define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
80 #define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
81
82 #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
83 #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
84
85 #define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
86 #define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
87
88 #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
89
90
91
92
93 #if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
94 #define LOCKLEVEL (PROFILING_INTLEVEL - 1)
95 #else
96 #define LOCKLEVEL XCHAL_EXCM_LEVEL
97 #endif
98
99 #define TOPLEVEL XCHAL_EXCM_LEVEL
100 #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
101
102
103
104
105 #define WSBITS (XCHAL_NUM_AREGS / 4)
106 #define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2)
107
108 #ifndef __ASSEMBLY__
109
110
111
112
113 #define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
114
115
116
117
118 #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
119
120
121
122
123 #define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg)))
124
125
126
127
128 #define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg)))
129
130
131
132
133 #define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
134
135 typedef struct {
136 unsigned long seg;
137 } mm_segment_t;
138
139 struct thread_struct {
140
141
142 unsigned long ra;
143 unsigned long sp;
144
145 mm_segment_t current_ds;
146
147
148
149 unsigned long bad_vaddr;
150 unsigned long bad_uaddr;
151 unsigned long error_code;
152 #ifdef CONFIG_HAVE_HW_BREAKPOINT
153 struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
154 struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
155 #endif
156
157 int align[0] __attribute__ ((aligned(16)));
158 };
159
160
161
162
163 #define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
164
165 #define INIT_THREAD \
166 { \
167 ra: 0, \
168 sp: sizeof(init_stack) + (long) &init_stack, \
169 current_ds: {0}, \
170 \
171 bad_vaddr: 0, \
172 bad_uaddr: 0, \
173 error_code: 0, \
174 }
175
176
177
178
179
180
181
182
183 #if IS_ENABLED(CONFIG_USER_ABI_CALL0)
184 #define USER_PS_VALUE ((USER_RING << PS_RING_SHIFT) | \
185 (1 << PS_UM_BIT) | \
186 (1 << PS_EXCM_BIT))
187 #else
188 #define USER_PS_VALUE (PS_WOE_MASK | \
189 (1 << PS_CALLINC_SHIFT) | \
190 (USER_RING << PS_RING_SHIFT) | \
191 (1 << PS_UM_BIT) | \
192 (1 << PS_EXCM_BIT))
193 #endif
194
195
196 #define start_thread(regs, new_pc, new_sp) \
197 do { \
198 memset((regs), 0, sizeof(*(regs))); \
199 (regs)->pc = (new_pc); \
200 (regs)->ps = USER_PS_VALUE; \
201 (regs)->areg[1] = (new_sp); \
202 (regs)->areg[0] = 0; \
203 (regs)->wmask = 1; \
204 (regs)->depc = 0; \
205 (regs)->windowbase = 0; \
206 (regs)->windowstart = 1; \
207 (regs)->syscall = NO_SYSCALL; \
208 } while (0)
209
210
211 struct task_struct;
212 struct mm_struct;
213
214
215 #define release_thread(thread) do { } while(0)
216
217 extern unsigned long get_wchan(struct task_struct *p);
218
219 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
220 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
221
222 #define cpu_relax() barrier()
223
224
225
226 #define xtensa_set_sr(x, sr) \
227 ({ \
228 unsigned int v = (unsigned int)(x); \
229 __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
230 })
231
232 #define xtensa_get_sr(sr) \
233 ({ \
234 unsigned int v; \
235 __asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \
236 v; \
237 })
238
239 #ifndef XCHAL_HAVE_EXTERN_REGS
240 #define XCHAL_HAVE_EXTERN_REGS 0
241 #endif
242
243 #if XCHAL_HAVE_EXTERN_REGS
244
245 static inline void set_er(unsigned long value, unsigned long addr)
246 {
247 asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
248 }
249
250 static inline unsigned long get_er(unsigned long addr)
251 {
252 register unsigned long value;
253 asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
254 return value;
255 }
256
257 #endif
258
259 #endif
260 #endif