This source file includes following definitions.
- SYSCALL_DEFINE1
- SYSCALL_DEFINE0
- SYSCALL_DEFINE3
- arch_cpu_idle
- arch_cpu_idle
- arch_cpu_idle
- copy_thread
- start_thread
- flush_thread
- dump_fpu
- elf_check_arch
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/sched/task.h>
12 #include <linux/sched/task_stack.h>
13
14 #include <linux/mm.h>
15 #include <linux/fs.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/slab.h>
19 #include <linux/syscalls.h>
20 #include <linux/elf.h>
21 #include <linux/tick.h>
22
23 SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
24 {
25 task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
26 return 0;
27 }
28
29
30
31
32
33
34
35
36
37
38
39 SYSCALL_DEFINE0(arc_gettls)
40 {
41 return task_thread_info(current)->thr_ptr;
42 }
43
44 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
45 {
46 struct pt_regs *regs = current_pt_regs();
47 u32 uval;
48 int ret;
49
50
51
52
53
54
55
56 WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
57
58
59 regs->status32 &= ~STATUS_Z_MASK;
60
61 ret = access_ok(uaddr, sizeof(*uaddr));
62 if (!ret)
63 goto fail;
64
65 again:
66 preempt_disable();
67
68 ret = __get_user(uval, uaddr);
69 if (ret)
70 goto fault;
71
72 if (uval != expected)
73 goto out;
74
75 ret = __put_user(new, uaddr);
76 if (ret)
77 goto fault;
78
79 regs->status32 |= STATUS_Z_MASK;
80
81 out:
82 preempt_enable();
83 return uval;
84
85 fault:
86 preempt_enable();
87
88 if (unlikely(ret != -EFAULT))
89 goto fail;
90
91 down_read(¤t->mm->mmap_sem);
92 ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
93 FAULT_FLAG_WRITE, NULL);
94 up_read(¤t->mm->mmap_sem);
95
96 if (likely(!ret))
97 goto again;
98
99 fail:
100 force_sig(SIGSEGV);
101 return ret;
102 }
103
104 #ifdef CONFIG_ISA_ARCV2
105
106 void arch_cpu_idle(void)
107 {
108
109 const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
110
111 __asm__ __volatile__(
112 "sleep %0 \n"
113 :
114 :"I"(arg));
115 }
116
117 #elif defined(CONFIG_EZNPS_MTM_EXT)
118
119 void arch_cpu_idle(void)
120 {
121
122 __asm__ __volatile__(
123 ".word %0 \n"
124 :
125 :"i"(CTOP_INST_HWSCHD_WFT_IE12));
126 }
127
128 #else
129
130 void arch_cpu_idle(void)
131 {
132
133 __asm__ __volatile__("sleep 0x3 \n");
134 }
135
136 #endif
137
138 asmlinkage void ret_from_fork(void);
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 int copy_thread(unsigned long clone_flags,
175 unsigned long usp, unsigned long kthread_arg,
176 struct task_struct *p)
177 {
178 struct pt_regs *c_regs;
179 unsigned long *childksp;
180 struct callee_regs *c_callee;
181 struct callee_regs *parent_callee;
182 struct pt_regs *regs = current_pt_regs();
183
184
185 c_regs = task_pt_regs(p);
186 childksp = (unsigned long *)c_regs - 2;
187 c_callee = ((struct callee_regs *)childksp) - 1;
188
189
190
191
192
193
194
195
196
197 p->thread.ksp = (unsigned long)c_callee;
198
199
200 childksp[0] = 0;
201 childksp[1] = (unsigned long)ret_from_fork;
202
203 if (unlikely(p->flags & PF_KTHREAD)) {
204 memset(c_regs, 0, sizeof(struct pt_regs));
205
206 c_callee->r13 = kthread_arg;
207 c_callee->r14 = usp;
208
209 return 0;
210 }
211
212
213
214
215 childksp[0] = 0;
216 childksp[1] = (unsigned long)ret_from_fork;
217
218
219 *c_regs = *regs;
220
221 if (usp)
222 c_regs->sp = usp;
223
224 c_regs->r0 = 0;
225
226 parent_callee = ((struct callee_regs *)regs) - 1;
227 *c_callee = *parent_callee;
228
229 if (unlikely(clone_flags & CLONE_SETTLS)) {
230
231
232
233
234 task_thread_info(p)->thr_ptr = regs->r3;
235 } else {
236
237 task_thread_info(p)->thr_ptr =
238 task_thread_info(current)->thr_ptr;
239 }
240
241
242
243
244
245
246
247
248
249 c_callee->r25 = task_thread_info(p)->thr_ptr;
250
251 #ifdef CONFIG_ARC_CURR_IN_REG
252
253
254
255
256
257
258 c_regs->user_r25 = c_callee->r25;
259 #endif
260
261 return 0;
262 }
263
264
265
266
267 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
268 {
269 regs->sp = usp;
270 regs->ret = pc;
271
272
273
274
275
276
277 regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
278
279 #ifdef CONFIG_EZNPS_MTM_EXT
280 regs->eflags = 0;
281 #endif
282
283
284 regs->lp_start = 0x10;
285 regs->lp_end = 0x80;
286 }
287
288
289
290
291 void flush_thread(void)
292 {
293 }
294
295 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
296 {
297 return 0;
298 }
299
300 int elf_check_arch(const struct elf32_hdr *x)
301 {
302 unsigned int eflags;
303
304 if (x->e_machine != EM_ARC_INUSE) {
305 pr_err("ELF not built for %s ISA\n",
306 is_isa_arcompact() ? "ARCompact":"ARCv2");
307 return 0;
308 }
309
310 eflags = x->e_flags;
311 if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
312 pr_err("ABI mismatch - you need newer toolchain\n");
313 force_sigsegv(SIGSEGV);
314 return 0;
315 }
316
317 return 1;
318 }
319 EXPORT_SYMBOL(elf_check_arch);