This source file includes following definitions.
- lookup_prev_stack_frame
- unwind_nested
- sh64_unwind_inner
- sh64_unwinder_dump
- sh64_unwinder_init
1
2
3
4
5
6
7
8 #include <linux/kallsyms.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <asm/page.h>
13 #include <asm/ptrace.h>
14 #include <asm/processor.h>
15 #include <asm/io.h>
16 #include <asm/unwinder.h>
17 #include <asm/stacktrace.h>
18
19 static u8 regcache[63];
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43 static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
44 unsigned long *pprev_fp, unsigned long *pprev_pc,
45 struct pt_regs *regs)
46 {
47 const char *sym;
48 char namebuf[128];
49 unsigned long offset;
50 unsigned long prologue = 0;
51 unsigned long fp_displacement = 0;
52 unsigned long fp_prev = 0;
53 unsigned long offset_r14 = 0, offset_r18 = 0;
54 int i, found_prologue_end = 0;
55
56 sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
57 if (!sym)
58 return -EINVAL;
59
60 prologue = pc - offset;
61 if (!prologue)
62 return -EINVAL;
63
64
65
66
67 if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
68 (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
69 ((fp & 7) != 0)) {
70 return -EINVAL;
71 }
72
73
74
75
76 for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
77 unsigned long op;
78 u8 major, minor;
79 u8 src, dest, disp;
80
81 op = *(unsigned long *)prologue;
82
83 major = (op >> 26) & 0x3f;
84 src = (op >> 20) & 0x3f;
85 minor = (op >> 16) & 0xf;
86 disp = (op >> 10) & 0x3f;
87 dest = (op >> 4) & 0x3f;
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103 switch (major) {
104 case (0x00 >> 2):
105 switch (minor) {
106 case 0x8:
107 case 0x9:
108
109 if (src == 15 && disp == 63 && dest == 14)
110 found_prologue_end = 1;
111
112 break;
113 case 0xa:
114 case 0xb:
115 if (src != 15 || dest != 15)
116 continue;
117
118 fp_displacement -= regcache[disp];
119 fp_prev = fp - fp_displacement;
120 break;
121 }
122 break;
123 case (0xa8 >> 2):
124 if (src != 15)
125 continue;
126
127 switch (dest) {
128 case 14:
129 if (offset_r14 || fp_displacement == 0)
130 continue;
131
132 offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
133 offset_r14 *= sizeof(unsigned long);
134 offset_r14 += fp_displacement;
135 break;
136 case 18:
137 if (offset_r18 || fp_displacement == 0)
138 continue;
139
140 offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
141 offset_r18 *= sizeof(unsigned long);
142 offset_r18 += fp_displacement;
143 break;
144 }
145
146 break;
147 case (0xcc >> 2):
148 if (dest >= 63) {
149 printk(KERN_NOTICE "%s: Invalid dest reg %d "
150 "specified in movi handler. Failed "
151 "opcode was 0x%lx: ", __func__,
152 dest, op);
153
154 continue;
155 }
156
157
158 regcache[dest] =
159 sign_extend64((((u64)op >> 10) & 0xffff), 9);
160 break;
161 case (0xd0 >> 2):
162 case (0xd4 >> 2):
163
164 if (src != 15 || dest != 15)
165 continue;
166
167
168 fp_displacement +=
169 (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
170 fp_prev = fp - fp_displacement;
171 break;
172 }
173
174 if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
175 break;
176 }
177
178 if (offset_r14 == 0 || fp_prev == 0) {
179 if (!offset_r14)
180 pr_debug("Unable to find r14 offset\n");
181 if (!fp_prev)
182 pr_debug("Unable to find previous fp\n");
183
184 return -EINVAL;
185 }
186
187
188 if (!*pprev_pc && (offset_r18 == 0))
189 return -EINVAL;
190
191 *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
192
193 if (offset_r18)
194 *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
195
196 *pprev_pc &= ~1;
197
198 return 0;
199 }
200
201
202
203
204
205
206 static struct pt_regs here_regs;
207
208 extern const char syscall_ret;
209 extern const char ret_from_syscall;
210 extern const char ret_from_exception;
211 extern const char ret_from_irq;
212
213 static void sh64_unwind_inner(const struct stacktrace_ops *ops,
214 void *data, struct pt_regs *regs);
215
216 static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
217 unsigned long pc, unsigned long fp)
218 {
219 if ((fp >= __MEMORY_START) &&
220 ((fp & 7) == 0))
221 sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
222 }
223
224 static void sh64_unwind_inner(const struct stacktrace_ops *ops,
225 void *data, struct pt_regs *regs)
226 {
227 unsigned long pc, fp;
228 int ofs = 0;
229 int first_pass;
230
231 pc = regs->pc & ~1;
232 fp = regs->regs[14];
233
234 first_pass = 1;
235 for (;;) {
236 int cond;
237 unsigned long next_fp, next_pc;
238
239 if (pc == ((unsigned long)&syscall_ret & ~1)) {
240 printk("SYSCALL\n");
241 unwind_nested(ops, data, pc, fp);
242 return;
243 }
244
245 if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
246 printk("SYSCALL (PREEMPTED)\n");
247 unwind_nested(ops, data, pc, fp);
248 return;
249 }
250
251
252
253 if (pc == ((unsigned long)&ret_from_exception & ~1)) {
254 printk("EXCEPTION\n");
255 unwind_nested(ops, data, pc, fp);
256 return;
257 }
258
259 if (pc == ((unsigned long)&ret_from_irq & ~1)) {
260 printk("IRQ\n");
261 unwind_nested(ops, data, pc, fp);
262 return;
263 }
264
265 cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
266 ((pc & 3) == 0) && ((fp & 7) == 0));
267
268 pc -= ofs;
269
270 ops->address(data, pc, 1);
271
272 if (first_pass) {
273
274
275
276 next_pc = regs->regs[18];
277 } else {
278 next_pc = 0;
279 }
280
281 if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
282 ofs = sizeof(unsigned long);
283 pc = next_pc & ~1;
284 fp = next_fp;
285 } else {
286 printk("Unable to lookup previous stack frame\n");
287 break;
288 }
289 first_pass = 0;
290 }
291
292 printk("\n");
293 }
294
295 static void sh64_unwinder_dump(struct task_struct *task,
296 struct pt_regs *regs,
297 unsigned long *sp,
298 const struct stacktrace_ops *ops,
299 void *data)
300 {
301 if (!regs) {
302
303
304
305
306 regs = &here_regs;
307
308 __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
309 __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
310 __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
311
312 __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
313 __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
314 __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
315 __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
316 __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
317 __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
318 __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
319 __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
320
321 __asm__ __volatile__ (
322 "pta 0f, tr0\n\t"
323 "blink tr0, %0\n\t"
324 "0: nop"
325 : "=r" (regs->pc)
326 );
327 }
328
329 sh64_unwind_inner(ops, data, regs);
330 }
331
332 static struct unwinder sh64_unwinder = {
333 .name = "sh64-unwinder",
334 .dump = sh64_unwinder_dump,
335 .rating = 150,
336 };
337
338 static int __init sh64_unwinder_init(void)
339 {
340 return unwinder_register(&sh64_unwinder);
341 }
342 early_initcall(sh64_unwinder_init);