1/* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Derived from "arch/i386/kernel/signal.c" 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15#include <linux/sched.h> 16#include <linux/mm.h> 17#include <linux/smp.h> 18#include <linux/kernel.h> 19#include <linux/signal.h> 20#include <linux/errno.h> 21#include <linux/wait.h> 22#include <linux/unistd.h> 23#include <linux/stddef.h> 24#include <linux/elf.h> 25#include <linux/ptrace.h> 26#include <linux/ratelimit.h> 27 28#include <asm/sigcontext.h> 29#include <asm/ucontext.h> 30#include <asm/uaccess.h> 31#include <asm/pgtable.h> 32#include <asm/unistd.h> 33#include <asm/cacheflush.h> 34#include <asm/syscalls.h> 35#include <asm/vdso.h> 36#include <asm/switch_to.h> 37#include <asm/tm.h> 38 39#include "signal.h" 40 41 42#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) 43#define FP_REGS_SIZE sizeof(elf_fpregset_t) 44 45#define TRAMP_TRACEBACK 3 46#define TRAMP_SIZE 6 47 48/* 49 * When we have signals to deliver, we set up on the user stack, 50 * going down from the original stack pointer: 51 * 1) a rt_sigframe struct which contains the ucontext 52 * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller 53 * frame for the signal handler. 54 */ 55 56struct rt_sigframe { 57 /* sys_rt_sigreturn requires the ucontext be the first field */ 58 struct ucontext uc; 59#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 60 struct ucontext uc_transact; 61#endif 62 unsigned long _unused[2]; 63 unsigned int tramp[TRAMP_SIZE]; 64 struct siginfo __user *pinfo; 65 void __user *puc; 66 struct siginfo info; 67 /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ 68 char abigap[USER_REDZONE_SIZE]; 69} __attribute__ ((aligned (16))); 70 71static const char fmt32[] = KERN_INFO \ 72 "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; 73static const char fmt64[] = KERN_INFO \ 74 "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; 75 76/* 77 * Set up the sigcontext for the signal frame. 78 */ 79 80static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, 81 int signr, sigset_t *set, unsigned long handler, 82 int ctx_has_vsx_region) 83{ 84 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 85 * process never used altivec yet (MSR_VEC is zero in pt_regs of 86 * the context). This is very important because we must ensure we 87 * don't lose the VRSAVE content that may have been set prior to 88 * the process doing its first vector operation 89 * Userland shall check AT_HWCAP to know whether it can rely on the 90 * v_regs pointer or not 91 */ 92#ifdef CONFIG_ALTIVEC 93 elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful); 94#endif 95 unsigned long msr = regs->msr; 96 long err = 0; 97 98#ifdef CONFIG_ALTIVEC 99 err |= __put_user(v_regs, &sc->v_regs); 100 101 /* save altivec registers */ 102 if (current->thread.used_vr) { 103 flush_altivec_to_thread(current); 104 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 105 err |= __copy_to_user(v_regs, ¤t->thread.vr_state, 106 33 * sizeof(vector128)); 107 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) 108 * contains valid data. 109 */ 110 msr |= MSR_VEC; 111 } 112 /* We always copy to/from vrsave, it's 0 if we don't have or don't 113 * use altivec. 114 */ 115 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 116 current->thread.vrsave = mfspr(SPRN_VRSAVE); 117 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 118#else /* CONFIG_ALTIVEC */ 119 err |= __put_user(0, &sc->v_regs); 120#endif /* CONFIG_ALTIVEC */ 121 flush_fp_to_thread(current); 122 /* copy fpr regs and fpscr */ 123 err |= copy_fpr_to_user(&sc->fp_regs, current); 124 125 /* 126 * Clear the MSR VSX bit to indicate there is no valid state attached 127 * to this context, except in the specific case below where we set it. 128 */ 129 msr &= ~MSR_VSX; 130#ifdef CONFIG_VSX 131 /* 132 * Copy VSX low doubleword to local buffer for formatting, 133 * then out to userspace. Update v_regs to point after the 134 * VMX data. 135 */ 136 if (current->thread.used_vsr && ctx_has_vsx_region) { 137 __giveup_vsx(current); 138 v_regs += ELF_NVRREG; 139 err |= copy_vsx_to_user(v_regs, current); 140 /* set MSR_VSX in the MSR value in the frame to 141 * indicate that sc->vs_reg) contains valid data. 142 */ 143 msr |= MSR_VSX; 144 } 145#endif /* CONFIG_VSX */ 146 err |= __put_user(&sc->gp_regs, &sc->regs); 147 WARN_ON(!FULL_REGS(regs)); 148 err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); 149 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); 150 err |= __put_user(signr, &sc->signal); 151 err |= __put_user(handler, &sc->handler); 152 if (set != NULL) 153 err |= __put_user(set->sig[0], &sc->oldmask); 154 155 return err; 156} 157 158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 159/* 160 * As above, but Transactional Memory is in use, so deliver sigcontexts 161 * containing checkpointed and transactional register states. 162 * 163 * To do this, we treclaim (done before entering here) to gather both sets of 164 * registers and set up the 'normal' sigcontext registers with rolled-back 165 * register values such that a simple signal handler sees a correct 166 * checkpointed register state. If interested, a TM-aware sighandler can 167 * examine the transactional registers in the 2nd sigcontext to determine the 168 * real origin of the signal. 169 */ 170static long setup_tm_sigcontexts(struct sigcontext __user *sc, 171 struct sigcontext __user *tm_sc, 172 struct pt_regs *regs, 173 int signr, sigset_t *set, unsigned long handler) 174{ 175 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 176 * process never used altivec yet (MSR_VEC is zero in pt_regs of 177 * the context). This is very important because we must ensure we 178 * don't lose the VRSAVE content that may have been set prior to 179 * the process doing its first vector operation 180 * Userland shall check AT_HWCAP to know wether it can rely on the 181 * v_regs pointer or not. 182 */ 183#ifdef CONFIG_ALTIVEC 184 elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *) 185 (((unsigned long)sc->vmx_reserve + 15) & ~0xful); 186 elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *) 187 (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful); 188#endif 189 unsigned long msr = regs->msr; 190 long err = 0; 191 192 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 193 194 /* Remove TM bits from thread's MSR. The MSR in the sigcontext 195 * just indicates to userland that we were doing a transaction, but we 196 * don't want to return in transactional state. This also ensures 197 * that flush_fp_to_thread won't set TIF_RESTORE_TM again. 198 */ 199 regs->msr &= ~MSR_TS_MASK; 200 201 flush_fp_to_thread(current); 202 203#ifdef CONFIG_ALTIVEC 204 err |= __put_user(v_regs, &sc->v_regs); 205 err |= __put_user(tm_v_regs, &tm_sc->v_regs); 206 207 /* save altivec registers */ 208 if (current->thread.used_vr) { 209 flush_altivec_to_thread(current); 210 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 211 err |= __copy_to_user(v_regs, ¤t->thread.vr_state, 212 33 * sizeof(vector128)); 213 /* If VEC was enabled there are transactional VRs valid too, 214 * else they're a copy of the checkpointed VRs. 215 */ 216 if (msr & MSR_VEC) 217 err |= __copy_to_user(tm_v_regs, 218 ¤t->thread.transact_vr, 219 33 * sizeof(vector128)); 220 else 221 err |= __copy_to_user(tm_v_regs, 222 ¤t->thread.vr_state, 223 33 * sizeof(vector128)); 224 225 /* set MSR_VEC in the MSR value in the frame to indicate 226 * that sc->v_reg contains valid data. 227 */ 228 msr |= MSR_VEC; 229 } 230 /* We always copy to/from vrsave, it's 0 if we don't have or don't 231 * use altivec. 232 */ 233 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 234 current->thread.vrsave = mfspr(SPRN_VRSAVE); 235 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 236 if (msr & MSR_VEC) 237 err |= __put_user(current->thread.transact_vrsave, 238 (u32 __user *)&tm_v_regs[33]); 239 else 240 err |= __put_user(current->thread.vrsave, 241 (u32 __user *)&tm_v_regs[33]); 242 243#else /* CONFIG_ALTIVEC */ 244 err |= __put_user(0, &sc->v_regs); 245 err |= __put_user(0, &tm_sc->v_regs); 246#endif /* CONFIG_ALTIVEC */ 247 248 /* copy fpr regs and fpscr */ 249 err |= copy_fpr_to_user(&sc->fp_regs, current); 250 if (msr & MSR_FP) 251 err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); 252 else 253 err |= copy_fpr_to_user(&tm_sc->fp_regs, current); 254 255#ifdef CONFIG_VSX 256 /* 257 * Copy VSX low doubleword to local buffer for formatting, 258 * then out to userspace. Update v_regs to point after the 259 * VMX data. 260 */ 261 if (current->thread.used_vsr) { 262 __giveup_vsx(current); 263 v_regs += ELF_NVRREG; 264 tm_v_regs += ELF_NVRREG; 265 266 err |= copy_vsx_to_user(v_regs, current); 267 268 if (msr & MSR_VSX) 269 err |= copy_transact_vsx_to_user(tm_v_regs, current); 270 else 271 err |= copy_vsx_to_user(tm_v_regs, current); 272 273 /* set MSR_VSX in the MSR value in the frame to 274 * indicate that sc->vs_reg) contains valid data. 275 */ 276 msr |= MSR_VSX; 277 } 278#endif /* CONFIG_VSX */ 279 280 err |= __put_user(&sc->gp_regs, &sc->regs); 281 err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); 282 WARN_ON(!FULL_REGS(regs)); 283 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); 284 err |= __copy_to_user(&sc->gp_regs, 285 ¤t->thread.ckpt_regs, GP_REGS_SIZE); 286 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); 287 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); 288 err |= __put_user(signr, &sc->signal); 289 err |= __put_user(handler, &sc->handler); 290 if (set != NULL) 291 err |= __put_user(set->sig[0], &sc->oldmask); 292 293 return err; 294} 295#endif 296 297/* 298 * Restore the sigcontext from the signal frame. 299 */ 300 301static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, 302 struct sigcontext __user *sc) 303{ 304#ifdef CONFIG_ALTIVEC 305 elf_vrreg_t __user *v_regs; 306#endif 307 unsigned long err = 0; 308 unsigned long save_r13 = 0; 309 unsigned long msr; 310#ifdef CONFIG_VSX 311 int i; 312#endif 313 314 /* If this is not a signal return, we preserve the TLS in r13 */ 315 if (!sig) 316 save_r13 = regs->gpr[13]; 317 318 /* copy the GPRs */ 319 err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); 320 err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); 321 /* get MSR separately, transfer the LE bit if doing signal return */ 322 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 323 if (sig) 324 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 325 err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); 326 err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); 327 err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); 328 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); 329 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); 330 /* skip SOFTE */ 331 regs->trap = 0; 332 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); 333 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); 334 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 335 336 if (!sig) 337 regs->gpr[13] = save_r13; 338 if (set != NULL) 339 err |= __get_user(set->sig[0], &sc->oldmask); 340 341 /* 342 * Do this before updating the thread state in 343 * current->thread.fpr/vr. That way, if we get preempted 344 * and another task grabs the FPU/Altivec, it won't be 345 * tempted to save the current CPU state into the thread_struct 346 * and corrupt what we are writing there. 347 */ 348 discard_lazy_cpu_state(); 349 350 /* 351 * Force reload of FP/VEC. 352 * This has to be done before copying stuff into current->thread.fpr/vr 353 * for the reasons explained in the previous comment. 354 */ 355 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 356 357#ifdef CONFIG_ALTIVEC 358 err |= __get_user(v_regs, &sc->v_regs); 359 if (err) 360 return err; 361 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 362 return -EFAULT; 363 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 364 if (v_regs != NULL && (msr & MSR_VEC) != 0) 365 err |= __copy_from_user(¤t->thread.vr_state, v_regs, 366 33 * sizeof(vector128)); 367 else if (current->thread.used_vr) 368 memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); 369 /* Always get VRSAVE back */ 370 if (v_regs != NULL) 371 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 372 else 373 current->thread.vrsave = 0; 374 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 375 mtspr(SPRN_VRSAVE, current->thread.vrsave); 376#endif /* CONFIG_ALTIVEC */ 377 /* restore floating point */ 378 err |= copy_fpr_from_user(current, &sc->fp_regs); 379#ifdef CONFIG_VSX 380 /* 381 * Get additional VSX data. Update v_regs to point after the 382 * VMX data. Copy VSX low doubleword from userspace to local 383 * buffer for formatting, then into the taskstruct. 384 */ 385 v_regs += ELF_NVRREG; 386 if ((msr & MSR_VSX) != 0) 387 err |= copy_vsx_from_user(current, v_regs); 388 else 389 for (i = 0; i < 32 ; i++) 390 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 391#endif 392 return err; 393} 394 395#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 396/* 397 * Restore the two sigcontexts from the frame of a transactional processes. 398 */ 399 400static long restore_tm_sigcontexts(struct pt_regs *regs, 401 struct sigcontext __user *sc, 402 struct sigcontext __user *tm_sc) 403{ 404#ifdef CONFIG_ALTIVEC 405 elf_vrreg_t __user *v_regs, *tm_v_regs; 406#endif 407 unsigned long err = 0; 408 unsigned long msr; 409#ifdef CONFIG_VSX 410 int i; 411#endif 412 /* copy the GPRs */ 413 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); 414 err |= __copy_from_user(¤t->thread.ckpt_regs, sc->gp_regs, 415 sizeof(regs->gpr)); 416 417 /* 418 * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. 419 * TEXASR was set by the signal delivery reclaim, as was TFIAR. 420 * Users doing anything abhorrent like thread-switching w/ signals for 421 * TM-Suspended code will have to back TEXASR/TFIAR up themselves. 422 * For the case of getting a signal and simply returning from it, 423 * we don't need to re-copy them here. 424 */ 425 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); 426 err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); 427 428 /* get MSR separately, transfer the LE bit if doing signal return */ 429 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 430 /* Don't allow reserved mode. */ 431 if (MSR_TM_RESV(msr)) 432 return -EINVAL; 433 434 /* pull in MSR TM from user context */ 435 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); 436 437 /* pull in MSR LE from user context */ 438 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 439 440 /* The following non-GPR non-FPR non-VR state is also checkpointed: */ 441 err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); 442 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); 443 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); 444 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); 445 err |= __get_user(current->thread.ckpt_regs.ctr, 446 &sc->gp_regs[PT_CTR]); 447 err |= __get_user(current->thread.ckpt_regs.link, 448 &sc->gp_regs[PT_LNK]); 449 err |= __get_user(current->thread.ckpt_regs.xer, 450 &sc->gp_regs[PT_XER]); 451 err |= __get_user(current->thread.ckpt_regs.ccr, 452 &sc->gp_regs[PT_CCR]); 453 454 /* These regs are not checkpointed; they can go in 'regs'. */ 455 err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); 456 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); 457 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); 458 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 459 460 /* 461 * Do this before updating the thread state in 462 * current->thread.fpr/vr. That way, if we get preempted 463 * and another task grabs the FPU/Altivec, it won't be 464 * tempted to save the current CPU state into the thread_struct 465 * and corrupt what we are writing there. 466 */ 467 discard_lazy_cpu_state(); 468 469 /* 470 * Force reload of FP/VEC. 471 * This has to be done before copying stuff into current->thread.fpr/vr 472 * for the reasons explained in the previous comment. 473 */ 474 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 475 476#ifdef CONFIG_ALTIVEC 477 err |= __get_user(v_regs, &sc->v_regs); 478 err |= __get_user(tm_v_regs, &tm_sc->v_regs); 479 if (err) 480 return err; 481 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 482 return -EFAULT; 483 if (tm_v_regs && !access_ok(VERIFY_READ, 484 tm_v_regs, 34 * sizeof(vector128))) 485 return -EFAULT; 486 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 487 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { 488 err |= __copy_from_user(¤t->thread.vr_state, v_regs, 489 33 * sizeof(vector128)); 490 err |= __copy_from_user(¤t->thread.transact_vr, tm_v_regs, 491 33 * sizeof(vector128)); 492 } 493 else if (current->thread.used_vr) { 494 memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); 495 memset(¤t->thread.transact_vr, 0, 33 * sizeof(vector128)); 496 } 497 /* Always get VRSAVE back */ 498 if (v_regs != NULL && tm_v_regs != NULL) { 499 err |= __get_user(current->thread.vrsave, 500 (u32 __user *)&v_regs[33]); 501 err |= __get_user(current->thread.transact_vrsave, 502 (u32 __user *)&tm_v_regs[33]); 503 } 504 else { 505 current->thread.vrsave = 0; 506 current->thread.transact_vrsave = 0; 507 } 508 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 509 mtspr(SPRN_VRSAVE, current->thread.vrsave); 510#endif /* CONFIG_ALTIVEC */ 511 /* restore floating point */ 512 err |= copy_fpr_from_user(current, &sc->fp_regs); 513 err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); 514#ifdef CONFIG_VSX 515 /* 516 * Get additional VSX data. Update v_regs to point after the 517 * VMX data. Copy VSX low doubleword from userspace to local 518 * buffer for formatting, then into the taskstruct. 519 */ 520 if (v_regs && ((msr & MSR_VSX) != 0)) { 521 v_regs += ELF_NVRREG; 522 tm_v_regs += ELF_NVRREG; 523 err |= copy_vsx_from_user(current, v_regs); 524 err |= copy_transact_vsx_from_user(current, tm_v_regs); 525 } else { 526 for (i = 0; i < 32 ; i++) { 527 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 528 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; 529 } 530 } 531#endif 532 tm_enable(); 533 /* Make sure the transaction is marked as failed */ 534 current->thread.tm_texasr |= TEXASR_FS; 535 /* This loads the checkpointed FP/VEC state, if used */ 536 tm_recheckpoint(¤t->thread, msr); 537 538 /* This loads the speculative FP/VEC state, if used */ 539 if (msr & MSR_FP) { 540 do_load_up_transact_fpu(¤t->thread); 541 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 542 } 543#ifdef CONFIG_ALTIVEC 544 if (msr & MSR_VEC) { 545 do_load_up_transact_altivec(¤t->thread); 546 regs->msr |= MSR_VEC; 547 } 548#endif 549 550 return err; 551} 552#endif 553 554/* 555 * Setup the trampoline code on the stack 556 */ 557static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) 558{ 559 int i; 560 long err = 0; 561 562 /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ 563 err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); 564 /* li r0, __NR_[rt_]sigreturn| */ 565 err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); 566 /* sc */ 567 err |= __put_user(0x44000002UL, &tramp[2]); 568 569 /* Minimal traceback info */ 570 for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) 571 err |= __put_user(0, &tramp[i]); 572 573 if (!err) 574 flush_icache_range((unsigned long) &tramp[0], 575 (unsigned long) &tramp[TRAMP_SIZE]); 576 577 return err; 578} 579 580/* 581 * Userspace code may pass a ucontext which doesn't include VSX added 582 * at the end. We need to check for this case. 583 */ 584#define UCONTEXTSIZEWITHOUTVSX \ 585 (sizeof(struct ucontext) - 32*sizeof(long)) 586 587/* 588 * Handle {get,set,swap}_context operations 589 */ 590int sys_swapcontext(struct ucontext __user *old_ctx, 591 struct ucontext __user *new_ctx, 592 long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) 593{ 594 unsigned char tmp; 595 sigset_t set; 596 unsigned long new_msr = 0; 597 int ctx_has_vsx_region = 0; 598 599 if (new_ctx && 600 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) 601 return -EFAULT; 602 /* 603 * Check that the context is not smaller than the original 604 * size (with VMX but without VSX) 605 */ 606 if (ctx_size < UCONTEXTSIZEWITHOUTVSX) 607 return -EINVAL; 608 /* 609 * If the new context state sets the MSR VSX bits but 610 * it doesn't provide VSX state. 611 */ 612 if ((ctx_size < sizeof(struct ucontext)) && 613 (new_msr & MSR_VSX)) 614 return -EINVAL; 615 /* Does the context have enough room to store VSX data? */ 616 if (ctx_size >= sizeof(struct ucontext)) 617 ctx_has_vsx_region = 1; 618 619 if (old_ctx != NULL) { 620 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) 621 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, 622 ctx_has_vsx_region) 623 || __copy_to_user(&old_ctx->uc_sigmask, 624 ¤t->blocked, sizeof(sigset_t))) 625 return -EFAULT; 626 } 627 if (new_ctx == NULL) 628 return 0; 629 if (!access_ok(VERIFY_READ, new_ctx, ctx_size) 630 || __get_user(tmp, (u8 __user *) new_ctx) 631 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) 632 return -EFAULT; 633 634 /* 635 * If we get a fault copying the context into the kernel's 636 * image of the user's registers, we can't just return -EFAULT 637 * because the user's registers will be corrupted. For instance 638 * the NIP value may have been updated but not some of the 639 * other registers. Given that we have done the access_ok 640 * and successfully read the first and last bytes of the region 641 * above, this should only happen in an out-of-memory situation 642 * or if another thread unmaps the region containing the context. 643 * We kill the task with a SIGSEGV in this situation. 644 */ 645 646 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) 647 do_exit(SIGSEGV); 648 set_current_blocked(&set); 649 if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) 650 do_exit(SIGSEGV); 651 652 /* This returns like rt_sigreturn */ 653 set_thread_flag(TIF_RESTOREALL); 654 return 0; 655} 656 657 658/* 659 * Do a signal return; undo the signal stack. 660 */ 661 662int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, 663 unsigned long r6, unsigned long r7, unsigned long r8, 664 struct pt_regs *regs) 665{ 666 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; 667 sigset_t set; 668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 669 unsigned long msr; 670#endif 671 672 /* Always make any pending restarted system calls return -EINTR */ 673 current->restart_block.fn = do_no_restart_syscall; 674 675 if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) 676 goto badframe; 677 678 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) 679 goto badframe; 680 set_current_blocked(&set); 681#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 682 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) 683 goto badframe; 684 if (MSR_TM_ACTIVE(msr)) { 685 /* We recheckpoint on return. */ 686 struct ucontext __user *uc_transact; 687 if (__get_user(uc_transact, &uc->uc_link)) 688 goto badframe; 689 if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, 690 &uc_transact->uc_mcontext)) 691 goto badframe; 692 } 693 else 694 /* Fall through, for non-TM restore */ 695#endif 696 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) 697 goto badframe; 698 699 if (restore_altstack(&uc->uc_stack)) 700 goto badframe; 701 702 set_thread_flag(TIF_RESTOREALL); 703 return 0; 704 705badframe: 706 if (show_unhandled_signals) 707 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 708 current->comm, current->pid, "rt_sigreturn", 709 (long)uc, regs->nip, regs->link); 710 711 force_sig(SIGSEGV, current); 712 return 0; 713} 714 715int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 716{ 717 struct rt_sigframe __user *frame; 718 unsigned long newsp = 0; 719 long err = 0; 720 721 frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0); 722 if (unlikely(frame == NULL)) 723 goto badframe; 724 725 err |= __put_user(&frame->info, &frame->pinfo); 726 err |= __put_user(&frame->uc, &frame->puc); 727 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 728 if (err) 729 goto badframe; 730 731 /* Create the ucontext. */ 732 err |= __put_user(0, &frame->uc.uc_flags); 733 err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); 734#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 735 if (MSR_TM_ACTIVE(regs->msr)) { 736 /* The ucontext_t passed to userland points to the second 737 * ucontext_t (for transactional state) with its uc_link ptr. 738 */ 739 err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); 740 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, 741 &frame->uc_transact.uc_mcontext, 742 regs, ksig->sig, 743 NULL, 744 (unsigned long)ksig->ka.sa.sa_handler); 745 } else 746#endif 747 { 748 err |= __put_user(0, &frame->uc.uc_link); 749 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig, 750 NULL, (unsigned long)ksig->ka.sa.sa_handler, 751 1); 752 } 753 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 754 if (err) 755 goto badframe; 756 757 /* Make sure signal handler doesn't get spurious FP exceptions */ 758 current->thread.fp_state.fpscr = 0; 759 760 /* Set up to return from userspace. */ 761 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { 762 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; 763 } else { 764 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); 765 if (err) 766 goto badframe; 767 regs->link = (unsigned long) &frame->tramp[0]; 768 } 769 770 /* Allocate a dummy caller frame for the signal handler. */ 771 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; 772 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); 773 774 /* Set up "regs" so we "return" to the signal handler. */ 775 if (is_elf2_task()) { 776 regs->nip = (unsigned long) ksig->ka.sa.sa_handler; 777 regs->gpr[12] = regs->nip; 778 } else { 779 /* Handler is *really* a pointer to the function descriptor for 780 * the signal routine. The first entry in the function 781 * descriptor is the entry address of signal and the second 782 * entry is the TOC value we need to use. 783 */ 784 func_descr_t __user *funct_desc_ptr = 785 (func_descr_t __user *) ksig->ka.sa.sa_handler; 786 787 err |= get_user(regs->nip, &funct_desc_ptr->entry); 788 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); 789 } 790 791 /* enter the signal handler in native-endian mode */ 792 regs->msr &= ~MSR_LE; 793 regs->msr |= (MSR_KERNEL & MSR_LE); 794 regs->gpr[1] = newsp; 795 regs->gpr[3] = ksig->sig; 796 regs->result = 0; 797 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 798 err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); 799 err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); 800 regs->gpr[6] = (unsigned long) frame; 801 } else { 802 regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; 803 } 804 if (err) 805 goto badframe; 806 807 return 0; 808 809badframe: 810 if (show_unhandled_signals) 811 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 812 current->comm, current->pid, "setup_rt_frame", 813 (long)frame, regs->nip, regs->link); 814 815 return 1; 816} 817