1/* 2 * This file handles the architecture dependent parts of process handling. 3 * 4 * Copyright IBM Corp. 1999, 2009 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * Hartmut Penner <hp@de.ibm.com>, 7 * Denis Joseph Barrow, 8 */ 9 10#include <linux/compiler.h> 11#include <linux/cpu.h> 12#include <linux/sched.h> 13#include <linux/kernel.h> 14#include <linux/mm.h> 15#include <linux/elfcore.h> 16#include <linux/smp.h> 17#include <linux/slab.h> 18#include <linux/interrupt.h> 19#include <linux/tick.h> 20#include <linux/personality.h> 21#include <linux/syscalls.h> 22#include <linux/compat.h> 23#include <linux/kprobes.h> 24#include <linux/random.h> 25#include <linux/module.h> 26#include <asm/io.h> 27#include <asm/processor.h> 28#include <asm/vtimer.h> 29#include <asm/exec.h> 30#include <asm/irq.h> 31#include <asm/nmi.h> 32#include <asm/smp.h> 33#include <asm/switch_to.h> 34#include <asm/runtime_instr.h> 35#include "entry.h" 36 37asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 38 39/* 40 * Return saved PC of a blocked thread. used in kernel/sched. 41 * resume in entry.S does not create a new stack frame, it 42 * just stores the registers %r6-%r15 to the frame given by 43 * schedule. We want to return the address of the caller of 44 * schedule, so we have to walk the backchain one time to 45 * find the frame schedule() store its return address. 46 */ 47unsigned long thread_saved_pc(struct task_struct *tsk) 48{ 49 struct stack_frame *sf, *low, *high; 50 51 if (!tsk || !task_stack_page(tsk)) 52 return 0; 53 low = task_stack_page(tsk); 54 high = (struct stack_frame *) task_pt_regs(tsk); 55 sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN); 56 if (sf <= low || sf > high) 57 return 0; 58 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); 59 if (sf <= low || sf > high) 60 return 0; 61 return sf->gprs[8]; 62} 63 64extern void kernel_thread_starter(void); 65 66/* 67 * Free current thread data structures etc.. 68 */ 69void exit_thread(void) 70{ 71 exit_thread_runtime_instr(); 72} 73 74void flush_thread(void) 75{ 76} 77 78void release_thread(struct task_struct *dead_task) 79{ 80} 81 82void arch_release_task_struct(struct task_struct *tsk) 83{ 84 if (tsk->thread.vxrs) 85 kfree(tsk->thread.vxrs); 86} 87 88int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 89 unsigned long arg, struct task_struct *p) 90{ 91 struct thread_info *ti; 92 struct fake_frame 93 { 94 struct stack_frame sf; 95 struct pt_regs childregs; 96 } *frame; 97 98 frame = container_of(task_pt_regs(p), struct fake_frame, childregs); 99 p->thread.ksp = (unsigned long) frame; 100 /* Save access registers to new thread structure. */ 101 save_access_regs(&p->thread.acrs[0]); 102 /* start new process with ar4 pointing to the correct address space */ 103 p->thread.mm_segment = get_fs(); 104 /* Don't copy debug registers */ 105 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); 106 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); 107 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 108 /* Initialize per thread user and system timer values */ 109 ti = task_thread_info(p); 110 ti->user_timer = 0; 111 ti->system_timer = 0; 112 113 frame->sf.back_chain = 0; 114 /* new return point is ret_from_fork */ 115 frame->sf.gprs[8] = (unsigned long) ret_from_fork; 116 /* fake return stack for resume(), don't go back to schedule */ 117 frame->sf.gprs[9] = (unsigned long) frame; 118 119 /* Store access registers to kernel stack of new process. */ 120 if (unlikely(p->flags & PF_KTHREAD)) { 121 /* kernel thread */ 122 memset(&frame->childregs, 0, sizeof(struct pt_regs)); 123 frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | 124 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 125 frame->childregs.psw.addr = PSW_ADDR_AMODE | 126 (unsigned long) kernel_thread_starter; 127 frame->childregs.gprs[9] = new_stackp; /* function */ 128 frame->childregs.gprs[10] = arg; 129 frame->childregs.gprs[11] = (unsigned long) do_exit; 130 frame->childregs.orig_gpr2 = -1; 131 132 return 0; 133 } 134 frame->childregs = *current_pt_regs(); 135 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ 136 frame->childregs.flags = 0; 137 if (new_stackp) 138 frame->childregs.gprs[15] = new_stackp; 139 140 /* Don't copy runtime instrumentation info */ 141 p->thread.ri_cb = NULL; 142 p->thread.ri_signum = 0; 143 frame->childregs.psw.mask &= ~PSW_MASK_RI; 144 145 /* Save the fpu registers to new thread structure. */ 146 save_fp_ctl(&p->thread.fp_regs.fpc); 147 save_fp_regs(p->thread.fp_regs.fprs); 148 p->thread.fp_regs.pad = 0; 149 p->thread.vxrs = NULL; 150 /* Set a new TLS ? */ 151 if (clone_flags & CLONE_SETTLS) { 152 unsigned long tls = frame->childregs.gprs[6]; 153 if (is_compat_task()) { 154 p->thread.acrs[0] = (unsigned int)tls; 155 } else { 156 p->thread.acrs[0] = (unsigned int)(tls >> 32); 157 p->thread.acrs[1] = (unsigned int)tls; 158 } 159 } 160 return 0; 161} 162 163asmlinkage void execve_tail(void) 164{ 165 current->thread.fp_regs.fpc = 0; 166 asm volatile("sfpc %0" : : "d" (0)); 167} 168 169/* 170 * fill in the FPU structure for a core dump. 171 */ 172int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 173{ 174 save_fp_ctl(&fpregs->fpc); 175 save_fp_regs(fpregs->fprs); 176 return 1; 177} 178EXPORT_SYMBOL(dump_fpu); 179 180unsigned long get_wchan(struct task_struct *p) 181{ 182 struct stack_frame *sf, *low, *high; 183 unsigned long return_address; 184 int count; 185 186 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) 187 return 0; 188 low = task_stack_page(p); 189 high = (struct stack_frame *) task_pt_regs(p); 190 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); 191 if (sf <= low || sf > high) 192 return 0; 193 for (count = 0; count < 16; count++) { 194 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); 195 if (sf <= low || sf > high) 196 return 0; 197 return_address = sf->gprs[8] & PSW_ADDR_INSN; 198 if (!in_sched_functions(return_address)) 199 return return_address; 200 } 201 return 0; 202} 203 204unsigned long arch_align_stack(unsigned long sp) 205{ 206 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 207 sp -= get_random_int() & ~PAGE_MASK; 208 return sp & ~0xf; 209} 210 211static inline unsigned long brk_rnd(void) 212{ 213 /* 8MB for 32bit, 1GB for 64bit */ 214 if (is_32bit_task()) 215 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 216 else 217 return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; 218} 219 220unsigned long arch_randomize_brk(struct mm_struct *mm) 221{ 222 unsigned long ret; 223 224 ret = PAGE_ALIGN(mm->brk + brk_rnd()); 225 return (ret > mm->brk) ? ret : mm->brk; 226} 227