root/arch/um/kernel/process.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. external_pid
  2. pid_to_processor_id
  3. free_stack
  4. alloc_stack
  5. set_current
  6. __switch_to
  7. interrupt_end
  8. get_current_pid
  9. new_thread_handler
  10. fork_handler
  11. copy_thread_tls
  12. initial_thread_cb
  13. time_travel_sleep
  14. um_idle_sleep
  15. arch_cpu_idle
  16. __cant_sleep
  17. user_context
  18. do_uml_exitcalls
  19. uml_strdup
  20. copy_to_user_proc
  21. copy_from_user_proc
  22. clear_user_proc
  23. cpu
  24. set_using_sysemu
  25. get_using_sysemu
  26. sysemu_proc_show
  27. sysemu_proc_open
  28. sysemu_proc_write
  29. make_proc_sysemu
  30. singlestepping
  31. arch_align_stack
  32. get_wchan
  33. elf_core_copy_fpregs

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
   4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
   5  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   6  * Copyright 2003 PathScale, Inc.
   7  */
   8 
   9 #include <linux/stddef.h>
  10 #include <linux/err.h>
  11 #include <linux/hardirq.h>
  12 #include <linux/mm.h>
  13 #include <linux/module.h>
  14 #include <linux/personality.h>
  15 #include <linux/proc_fs.h>
  16 #include <linux/ptrace.h>
  17 #include <linux/random.h>
  18 #include <linux/slab.h>
  19 #include <linux/sched.h>
  20 #include <linux/sched/debug.h>
  21 #include <linux/sched/task.h>
  22 #include <linux/sched/task_stack.h>
  23 #include <linux/seq_file.h>
  24 #include <linux/tick.h>
  25 #include <linux/threads.h>
  26 #include <linux/tracehook.h>
  27 #include <asm/current.h>
  28 #include <asm/pgtable.h>
  29 #include <asm/mmu_context.h>
  30 #include <linux/uaccess.h>
  31 #include <as-layout.h>
  32 #include <kern_util.h>
  33 #include <os.h>
  34 #include <skas.h>
  35 #include <timer-internal.h>
  36 
  37 /*
  38  * This is a per-cpu array.  A processor only modifies its entry and it only
  39  * cares about its entry, so it's OK if another processor is modifying its
  40  * entry.
  41  */
  42 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
  43 
  44 static inline int external_pid(void)
  45 {
  46         /* FIXME: Need to look up userspace_pid by cpu */
  47         return userspace_pid[0];
  48 }
  49 
  50 int pid_to_processor_id(int pid)
  51 {
  52         int i;
  53 
  54         for (i = 0; i < ncpus; i++) {
  55                 if (cpu_tasks[i].pid == pid)
  56                         return i;
  57         }
  58         return -1;
  59 }
  60 
  61 void free_stack(unsigned long stack, int order)
  62 {
  63         free_pages(stack, order);
  64 }
  65 
  66 unsigned long alloc_stack(int order, int atomic)
  67 {
  68         unsigned long page;
  69         gfp_t flags = GFP_KERNEL;
  70 
  71         if (atomic)
  72                 flags = GFP_ATOMIC;
  73         page = __get_free_pages(flags, order);
  74 
  75         return page;
  76 }
  77 
  78 static inline void set_current(struct task_struct *task)
  79 {
  80         cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
  81                 { external_pid(), task });
  82 }
  83 
  84 extern void arch_switch_to(struct task_struct *to);
  85 
  86 void *__switch_to(struct task_struct *from, struct task_struct *to)
  87 {
  88         to->thread.prev_sched = from;
  89         set_current(to);
  90 
  91         switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
  92         arch_switch_to(current);
  93 
  94         return current->thread.prev_sched;
  95 }
  96 
  97 void interrupt_end(void)
  98 {
  99         struct pt_regs *regs = &current->thread.regs;
 100 
 101         if (need_resched())
 102                 schedule();
 103         if (test_thread_flag(TIF_SIGPENDING))
 104                 do_signal(regs);
 105         if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 106                 tracehook_notify_resume(regs);
 107 }
 108 
 109 int get_current_pid(void)
 110 {
 111         return task_pid_nr(current);
 112 }
 113 
 114 /*
 115  * This is called magically, by its address being stuffed in a jmp_buf
 116  * and being longjmp-d to.
 117  */
 118 void new_thread_handler(void)
 119 {
 120         int (*fn)(void *), n;
 121         void *arg;
 122 
 123         if (current->thread.prev_sched != NULL)
 124                 schedule_tail(current->thread.prev_sched);
 125         current->thread.prev_sched = NULL;
 126 
 127         fn = current->thread.request.u.thread.proc;
 128         arg = current->thread.request.u.thread.arg;
 129 
 130         /*
 131          * callback returns only if the kernel thread execs a process
 132          */
 133         n = fn(arg);
 134         userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
 135 }
 136 
 137 /* Called magically, see new_thread_handler above */
 138 void fork_handler(void)
 139 {
 140         force_flush_all();
 141 
 142         schedule_tail(current->thread.prev_sched);
 143 
 144         /*
 145          * XXX: if interrupt_end() calls schedule, this call to
 146          * arch_switch_to isn't needed. We could want to apply this to
 147          * improve performance. -bb
 148          */
 149         arch_switch_to(current);
 150 
 151         current->thread.prev_sched = NULL;
 152 
 153         userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
 154 }
 155 
 156 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 157                 unsigned long arg, struct task_struct * p, unsigned long tls)
 158 {
 159         void (*handler)(void);
 160         int kthread = current->flags & PF_KTHREAD;
 161         int ret = 0;
 162 
 163         p->thread = (struct thread_struct) INIT_THREAD;
 164 
 165         if (!kthread) {
 166                 memcpy(&p->thread.regs.regs, current_pt_regs(),
 167                        sizeof(p->thread.regs.regs));
 168                 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
 169                 if (sp != 0)
 170                         REGS_SP(p->thread.regs.regs.gp) = sp;
 171 
 172                 handler = fork_handler;
 173 
 174                 arch_copy_thread(&current->thread.arch, &p->thread.arch);
 175         } else {
 176                 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
 177                 p->thread.request.u.thread.proc = (int (*)(void *))sp;
 178                 p->thread.request.u.thread.arg = (void *)arg;
 179                 handler = new_thread_handler;
 180         }
 181 
 182         new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
 183 
 184         if (!kthread) {
 185                 clear_flushed_tls(p);
 186 
 187                 /*
 188                  * Set a new TLS for the child thread?
 189                  */
 190                 if (clone_flags & CLONE_SETTLS)
 191                         ret = arch_set_tls(p, tls);
 192         }
 193 
 194         return ret;
 195 }
 196 
 197 void initial_thread_cb(void (*proc)(void *), void *arg)
 198 {
 199         int save_kmalloc_ok = kmalloc_ok;
 200 
 201         kmalloc_ok = 0;
 202         initial_thread_cb_skas(proc, arg);
 203         kmalloc_ok = save_kmalloc_ok;
 204 }
 205 
 206 static void time_travel_sleep(unsigned long long duration)
 207 {
 208         unsigned long long next = time_travel_time + duration;
 209 
 210         if (time_travel_mode != TT_MODE_INFCPU)
 211                 os_timer_disable();
 212 
 213         while (time_travel_timer_mode == TT_TMR_PERIODIC &&
 214                time_travel_timer_expiry < time_travel_time)
 215                 time_travel_set_timer_expiry(time_travel_timer_expiry +
 216                                              time_travel_timer_interval);
 217 
 218         if (time_travel_timer_mode != TT_TMR_DISABLED &&
 219             time_travel_timer_expiry < next) {
 220                 if (time_travel_timer_mode == TT_TMR_ONESHOT)
 221                         time_travel_set_timer_mode(TT_TMR_DISABLED);
 222                 /*
 223                  * In basic mode, time_travel_time will be adjusted in
 224                  * the timer IRQ handler so it works even when the signal
 225                  * comes from the OS timer, see there.
 226                  */
 227                 if (time_travel_mode != TT_MODE_BASIC)
 228                         time_travel_set_time(time_travel_timer_expiry);
 229 
 230                 deliver_alarm();
 231         } else {
 232                 time_travel_set_time(next);
 233         }
 234 
 235         if (time_travel_mode != TT_MODE_INFCPU) {
 236                 if (time_travel_timer_mode == TT_TMR_PERIODIC)
 237                         os_timer_set_interval(time_travel_timer_interval);
 238                 else if (time_travel_timer_mode == TT_TMR_ONESHOT)
 239                         os_timer_one_shot(time_travel_timer_expiry - next);
 240         }
 241 }
 242 
 243 static void um_idle_sleep(void)
 244 {
 245         unsigned long long duration = UM_NSEC_PER_SEC;
 246 
 247         if (time_travel_mode != TT_MODE_OFF) {
 248                 time_travel_sleep(duration);
 249         } else {
 250                 os_idle_sleep(duration);
 251         }
 252 }
 253 
 254 void arch_cpu_idle(void)
 255 {
 256         cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
 257         um_idle_sleep();
 258         local_irq_enable();
 259 }
 260 
 261 int __cant_sleep(void) {
 262         return in_atomic() || irqs_disabled() || in_interrupt();
 263         /* Is in_interrupt() really needed? */
 264 }
 265 
 266 int user_context(unsigned long sp)
 267 {
 268         unsigned long stack;
 269 
 270         stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
 271         return stack != (unsigned long) current_thread_info();
 272 }
 273 
 274 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
 275 
 276 void do_uml_exitcalls(void)
 277 {
 278         exitcall_t *call;
 279 
 280         call = &__uml_exitcall_end;
 281         while (--call >= &__uml_exitcall_begin)
 282                 (*call)();
 283 }
 284 
 285 char *uml_strdup(const char *string)
 286 {
 287         return kstrdup(string, GFP_KERNEL);
 288 }
 289 EXPORT_SYMBOL(uml_strdup);
 290 
 291 int copy_to_user_proc(void __user *to, void *from, int size)
 292 {
 293         return copy_to_user(to, from, size);
 294 }
 295 
 296 int copy_from_user_proc(void *to, void __user *from, int size)
 297 {
 298         return copy_from_user(to, from, size);
 299 }
 300 
 301 int clear_user_proc(void __user *buf, int size)
 302 {
 303         return clear_user(buf, size);
 304 }
 305 
 306 int cpu(void)
 307 {
 308         return current_thread_info()->cpu;
 309 }
 310 
 311 static atomic_t using_sysemu = ATOMIC_INIT(0);
 312 int sysemu_supported;
 313 
 314 void set_using_sysemu(int value)
 315 {
 316         if (value > sysemu_supported)
 317                 return;
 318         atomic_set(&using_sysemu, value);
 319 }
 320 
 321 int get_using_sysemu(void)
 322 {
 323         return atomic_read(&using_sysemu);
 324 }
 325 
 326 static int sysemu_proc_show(struct seq_file *m, void *v)
 327 {
 328         seq_printf(m, "%d\n", get_using_sysemu());
 329         return 0;
 330 }
 331 
 332 static int sysemu_proc_open(struct inode *inode, struct file *file)
 333 {
 334         return single_open(file, sysemu_proc_show, NULL);
 335 }
 336 
 337 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
 338                                  size_t count, loff_t *pos)
 339 {
 340         char tmp[2];
 341 
 342         if (copy_from_user(tmp, buf, 1))
 343                 return -EFAULT;
 344 
 345         if (tmp[0] >= '0' && tmp[0] <= '2')
 346                 set_using_sysemu(tmp[0] - '0');
 347         /* We use the first char, but pretend to write everything */
 348         return count;
 349 }
 350 
 351 static const struct file_operations sysemu_proc_fops = {
 352         .owner          = THIS_MODULE,
 353         .open           = sysemu_proc_open,
 354         .read           = seq_read,
 355         .llseek         = seq_lseek,
 356         .release        = single_release,
 357         .write          = sysemu_proc_write,
 358 };
 359 
 360 int __init make_proc_sysemu(void)
 361 {
 362         struct proc_dir_entry *ent;
 363         if (!sysemu_supported)
 364                 return 0;
 365 
 366         ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
 367 
 368         if (ent == NULL)
 369         {
 370                 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
 371                 return 0;
 372         }
 373 
 374         return 0;
 375 }
 376 
 377 late_initcall(make_proc_sysemu);
 378 
 379 int singlestepping(void * t)
 380 {
 381         struct task_struct *task = t ? t : current;
 382 
 383         if (!(task->ptrace & PT_DTRACE))
 384                 return 0;
 385 
 386         if (task->thread.singlestep_syscall)
 387                 return 1;
 388 
 389         return 2;
 390 }
 391 
 392 /*
 393  * Only x86 and x86_64 have an arch_align_stack().
 394  * All other arches have "#define arch_align_stack(x) (x)"
 395  * in their asm/exec.h
 396  * As this is included in UML from asm-um/system-generic.h,
 397  * we can use it to behave as the subarch does.
 398  */
 399 #ifndef arch_align_stack
 400 unsigned long arch_align_stack(unsigned long sp)
 401 {
 402         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 403                 sp -= get_random_int() % 8192;
 404         return sp & ~0xf;
 405 }
 406 #endif
 407 
 408 unsigned long get_wchan(struct task_struct *p)
 409 {
 410         unsigned long stack_page, sp, ip;
 411         bool seen_sched = 0;
 412 
 413         if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
 414                 return 0;
 415 
 416         stack_page = (unsigned long) task_stack_page(p);
 417         /* Bail if the process has no kernel stack for some reason */
 418         if (stack_page == 0)
 419                 return 0;
 420 
 421         sp = p->thread.switch_buf->JB_SP;
 422         /*
 423          * Bail if the stack pointer is below the bottom of the kernel
 424          * stack for some reason
 425          */
 426         if (sp < stack_page)
 427                 return 0;
 428 
 429         while (sp < stack_page + THREAD_SIZE) {
 430                 ip = *((unsigned long *) sp);
 431                 if (in_sched_functions(ip))
 432                         /* Ignore everything until we're above the scheduler */
 433                         seen_sched = 1;
 434                 else if (kernel_text_address(ip) && seen_sched)
 435                         return ip;
 436 
 437                 sp += sizeof(unsigned long);
 438         }
 439 
 440         return 0;
 441 }
 442 
 443 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
 444 {
 445         int cpu = current_thread_info()->cpu;
 446 
 447         return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
 448 }
 449 

/* [<][>][^][v][top][bottom][index][help] */