This source file includes following definitions.
- flush_thread
- thread_saved_pc
- copy_thread_tls
- dump_fpu
- dump_task_regs
- get_wchan
- arch_cpu_idle
1
2
3
4 #include <linux/module.h>
5 #include <linux/version.h>
6 #include <linux/sched.h>
7 #include <linux/sched/task_stack.h>
8 #include <linux/sched/debug.h>
9 #include <linux/delay.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ptrace.h>
13
14 #include <asm/elf.h>
15 #include <abi/reg_ops.h>
16
17 struct cpuinfo_csky cpu_data[NR_CPUS];
18
19 asmlinkage void ret_from_fork(void);
20 asmlinkage void ret_from_kernel_thread(void);
21
22
23
24
25 void flush_thread(void){}
26
27
28
29
30 unsigned long thread_saved_pc(struct task_struct *tsk)
31 {
32 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
33
34 return sw->r15;
35 }
36
37 int copy_thread_tls(unsigned long clone_flags,
38 unsigned long usp,
39 unsigned long kthread_arg,
40 struct task_struct *p,
41 unsigned long tls)
42 {
43 struct switch_stack *childstack;
44 struct pt_regs *childregs = task_pt_regs(p);
45
46 #ifdef CONFIG_CPU_HAS_FPU
47 save_to_user_fp(&p->thread.user_fp);
48 #endif
49
50 childstack = ((struct switch_stack *) childregs) - 1;
51 memset(childstack, 0, sizeof(struct switch_stack));
52
53
54 p->thread.ksp = (unsigned long)childstack;
55
56 if (unlikely(p->flags & PF_KTHREAD)) {
57 memset(childregs, 0, sizeof(struct pt_regs));
58 childstack->r15 = (unsigned long) ret_from_kernel_thread;
59 childstack->r10 = kthread_arg;
60 childstack->r9 = usp;
61 childregs->sr = mfcr("psr");
62 } else {
63 *childregs = *(current_pt_regs());
64 if (usp)
65 childregs->usp = usp;
66 if (clone_flags & CLONE_SETTLS)
67 task_thread_info(p)->tp_value = childregs->tls
68 = tls;
69
70 childregs->a0 = 0;
71 childstack->r15 = (unsigned long) ret_from_fork;
72 }
73
74 return 0;
75 }
76
77
78 int dump_fpu(struct pt_regs *regs, struct user_fp *fpu)
79 {
80 memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu));
81 return 1;
82 }
83 EXPORT_SYMBOL(dump_fpu);
84
85 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
86 {
87 struct pt_regs *regs = task_pt_regs(tsk);
88
89
90 ELF_CORE_COPY_REGS((*pr_regs), regs)
91
92 return 1;
93 }
94
95 unsigned long get_wchan(struct task_struct *p)
96 {
97 unsigned long lr;
98 unsigned long *fp, *stack_start, *stack_end;
99 int count = 0;
100
101 if (!p || p == current || p->state == TASK_RUNNING)
102 return 0;
103
104 stack_start = (unsigned long *)end_of_stack(p);
105 stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
106
107 fp = (unsigned long *) thread_saved_fp(p);
108 do {
109 if (fp < stack_start || fp > stack_end)
110 return 0;
111 #ifdef CONFIG_STACKTRACE
112 lr = fp[1];
113 fp = (unsigned long *)fp[0];
114 #else
115 lr = *fp++;
116 #endif
117 if (!in_sched_functions(lr) &&
118 __kernel_text_address(lr))
119 return lr;
120 } while (count++ < 16);
121
122 return 0;
123 }
124 EXPORT_SYMBOL(get_wchan);
125
126 #ifndef CONFIG_CPU_PM_NONE
127 void arch_cpu_idle(void)
128 {
129 #ifdef CONFIG_CPU_PM_WAIT
130 asm volatile("wait\n");
131 #endif
132
133 #ifdef CONFIG_CPU_PM_DOZE
134 asm volatile("doze\n");
135 #endif
136
137 #ifdef CONFIG_CPU_PM_STOP
138 asm volatile("stop\n");
139 #endif
140 local_irq_enable();
141 }
142 #endif