This source file includes following definitions.
- put_sigset_t
- get_sigset_t
- save_general_regs
- restore_general_regs
- get_sigset_t
- save_general_regs
- restore_general_regs
- copy_fpr_to_user
- copy_fpr_from_user
- copy_vsx_to_user
- copy_vsx_from_user
- copy_ckfpr_to_user
- copy_ckfpr_from_user
- copy_ckvsx_to_user
- copy_ckvsx_from_user
- copy_fpr_to_user
- copy_fpr_from_user
- copy_ckfpr_to_user
- copy_ckfpr_from_user
- save_user_regs
- save_tm_user_regs
- restore_user_regs
- restore_tm_user_regs
- handle_rt_signal32
- do_setcontext
- do_setcontext_tm
- COMPAT_SYSCALL_DEFINE3
- COMPAT_SYSCALL_DEFINE0
- SYSCALL_DEFINE3
- handle_signal32
- COMPAT_SYSCALL_DEFINE0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #include <asm/pgtable.h>
51 #endif
52
53 #include "signal.h"
54
55
56 #ifdef CONFIG_PPC64
57 #define old_sigaction old_sigaction32
58 #define sigcontext sigcontext32
59 #define mcontext mcontext32
60 #define ucontext ucontext32
61
62 #define __save_altstack __compat_save_altstack
63
64
65
66
67
68 #define UCONTEXTSIZEWITHOUTVSX \
69 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
70
71
72
73
74
75
76
77
78 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
79 #undef __SIGNAL_FRAMESIZE
80 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
81 #undef ELF_NVRREG
82 #define ELF_NVRREG ELF_NVRREG32
83
84
85
86
87
88 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
89 {
90 return put_compat_sigset(uset, set, sizeof(*uset));
91 }
92
93 static inline int get_sigset_t(sigset_t *set,
94 const compat_sigset_t __user *uset)
95 {
96 return get_compat_sigset(set, uset);
97 }
98
99 #define to_user_ptr(p) ptr_to_compat(p)
100 #define from_user_ptr(p) compat_ptr(p)
101
102 static inline int save_general_regs(struct pt_regs *regs,
103 struct mcontext __user *frame)
104 {
105 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
106 int i;
107
108 elf_greg_t64 softe = 0x1;
109
110 WARN_ON(!FULL_REGS(regs));
111
112 for (i = 0; i <= PT_RESULT; i ++) {
113 if (i == 14 && !FULL_REGS(regs))
114 i = 32;
115 if ( i == PT_SOFTE) {
116 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
117 return -EFAULT;
118 else
119 continue;
120 }
121 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
122 return -EFAULT;
123 }
124 return 0;
125 }
126
127 static inline int restore_general_regs(struct pt_regs *regs,
128 struct mcontext __user *sr)
129 {
130 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
131 int i;
132
133 for (i = 0; i <= PT_RESULT; i++) {
134 if ((i == PT_MSR) || (i == PT_SOFTE))
135 continue;
136 if (__get_user(gregs[i], &sr->mc_gregs[i]))
137 return -EFAULT;
138 }
139 return 0;
140 }
141
142 #else
143
144 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
145
146 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
147 {
148 return copy_to_user(uset, set, sizeof(*uset));
149 }
150
151 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
152 {
153 return copy_from_user(set, uset, sizeof(*uset));
154 }
155
156 #define to_user_ptr(p) ((unsigned long)(p))
157 #define from_user_ptr(p) ((void __user *)(p))
158
159 static inline int save_general_regs(struct pt_regs *regs,
160 struct mcontext __user *frame)
161 {
162 WARN_ON(!FULL_REGS(regs));
163 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
164 }
165
166 static inline int restore_general_regs(struct pt_regs *regs,
167 struct mcontext __user *sr)
168 {
169
170 if (__copy_from_user(regs, &sr->mc_gregs,
171 PT_MSR * sizeof(elf_greg_t)))
172 return -EFAULT;
173
174 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
175 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
176 return -EFAULT;
177 return 0;
178 }
179 #endif
180
181
182
183
184
185
186
187
188
189
190
191
192
193 struct sigframe {
194 struct sigcontext sctx;
195 struct mcontext mctx;
196 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197 struct sigcontext sctx_transact;
198 struct mcontext mctx_transact;
199 #endif
200
201
202
203
204 int abigap[56];
205 };
206
207
208 #define tramp mc_pad
209
210
211
212
213
214
215
216
217
218
219
220
221 struct rt_sigframe {
222 #ifdef CONFIG_PPC64
223 compat_siginfo_t info;
224 #else
225 struct siginfo info;
226 #endif
227 struct ucontext uc;
228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 struct ucontext uc_transact;
230 #endif
231
232
233
234
235 int abigap[56];
236 };
237
238 #ifdef CONFIG_VSX
239 unsigned long copy_fpr_to_user(void __user *to,
240 struct task_struct *task)
241 {
242 u64 buf[ELF_NFPREG];
243 int i;
244
245
246 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
247 buf[i] = task->thread.TS_FPR(i);
248 buf[i] = task->thread.fp_state.fpscr;
249 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
250 }
251
252 unsigned long copy_fpr_from_user(struct task_struct *task,
253 void __user *from)
254 {
255 u64 buf[ELF_NFPREG];
256 int i;
257
258 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
259 return 1;
260 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
261 task->thread.TS_FPR(i) = buf[i];
262 task->thread.fp_state.fpscr = buf[i];
263
264 return 0;
265 }
266
267 unsigned long copy_vsx_to_user(void __user *to,
268 struct task_struct *task)
269 {
270 u64 buf[ELF_NVSRHALFREG];
271 int i;
272
273
274 for (i = 0; i < ELF_NVSRHALFREG; i++)
275 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
276 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
277 }
278
279 unsigned long copy_vsx_from_user(struct task_struct *task,
280 void __user *from)
281 {
282 u64 buf[ELF_NVSRHALFREG];
283 int i;
284
285 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
286 return 1;
287 for (i = 0; i < ELF_NVSRHALFREG ; i++)
288 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
289 return 0;
290 }
291
292 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
293 unsigned long copy_ckfpr_to_user(void __user *to,
294 struct task_struct *task)
295 {
296 u64 buf[ELF_NFPREG];
297 int i;
298
299
300 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
301 buf[i] = task->thread.TS_CKFPR(i);
302 buf[i] = task->thread.ckfp_state.fpscr;
303 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
304 }
305
306 unsigned long copy_ckfpr_from_user(struct task_struct *task,
307 void __user *from)
308 {
309 u64 buf[ELF_NFPREG];
310 int i;
311
312 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
313 return 1;
314 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
315 task->thread.TS_CKFPR(i) = buf[i];
316 task->thread.ckfp_state.fpscr = buf[i];
317
318 return 0;
319 }
320
321 unsigned long copy_ckvsx_to_user(void __user *to,
322 struct task_struct *task)
323 {
324 u64 buf[ELF_NVSRHALFREG];
325 int i;
326
327
328 for (i = 0; i < ELF_NVSRHALFREG; i++)
329 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
330 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
331 }
332
333 unsigned long copy_ckvsx_from_user(struct task_struct *task,
334 void __user *from)
335 {
336 u64 buf[ELF_NVSRHALFREG];
337 int i;
338
339 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
340 return 1;
341 for (i = 0; i < ELF_NVSRHALFREG ; i++)
342 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
343 return 0;
344 }
345 #endif
346 #else
347 inline unsigned long copy_fpr_to_user(void __user *to,
348 struct task_struct *task)
349 {
350 return __copy_to_user(to, task->thread.fp_state.fpr,
351 ELF_NFPREG * sizeof(double));
352 }
353
354 inline unsigned long copy_fpr_from_user(struct task_struct *task,
355 void __user *from)
356 {
357 return __copy_from_user(task->thread.fp_state.fpr, from,
358 ELF_NFPREG * sizeof(double));
359 }
360
361 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
362 inline unsigned long copy_ckfpr_to_user(void __user *to,
363 struct task_struct *task)
364 {
365 return __copy_to_user(to, task->thread.ckfp_state.fpr,
366 ELF_NFPREG * sizeof(double));
367 }
368
369 inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
370 void __user *from)
371 {
372 return __copy_from_user(task->thread.ckfp_state.fpr, from,
373 ELF_NFPREG * sizeof(double));
374 }
375 #endif
376 #endif
377
378
379
380
381
382
383 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
384 struct mcontext __user *tm_frame, int sigret,
385 int ctx_has_vsx_region)
386 {
387 unsigned long msr = regs->msr;
388
389
390 flush_fp_to_thread(current);
391
392
393 if (save_general_regs(regs, frame))
394 return 1;
395
396 #ifdef CONFIG_ALTIVEC
397
398 if (current->thread.used_vr) {
399 flush_altivec_to_thread(current);
400 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
401 ELF_NVRREG * sizeof(vector128)))
402 return 1;
403
404
405 msr |= MSR_VEC;
406 }
407
408
409
410
411
412
413
414
415 if (cpu_has_feature(CPU_FTR_ALTIVEC))
416 current->thread.vrsave = mfspr(SPRN_VRSAVE);
417 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
418 return 1;
419 #endif
420 if (copy_fpr_to_user(&frame->mc_fregs, current))
421 return 1;
422
423
424
425
426
427 msr &= ~MSR_VSX;
428 #ifdef CONFIG_VSX
429
430
431
432
433
434
435 if (current->thread.used_vsr && ctx_has_vsx_region) {
436 flush_vsx_to_thread(current);
437 if (copy_vsx_to_user(&frame->mc_vsregs, current))
438 return 1;
439 msr |= MSR_VSX;
440 }
441 #endif
442 #ifdef CONFIG_SPE
443
444 if (current->thread.used_spe) {
445 flush_spe_to_thread(current);
446 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
447 ELF_NEVRREG * sizeof(u32)))
448 return 1;
449
450
451 msr |= MSR_SPE;
452 }
453
454
455
456 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
457 return 1;
458 #endif
459
460 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
461 return 1;
462
463
464
465 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
466 return 1;
467
468 if (sigret) {
469
470 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
471 || __put_user(PPC_INST_SC, &frame->tramp[1]))
472 return 1;
473 flush_icache_range((unsigned long) &frame->tramp[0],
474 (unsigned long) &frame->tramp[2]);
475 }
476
477 return 0;
478 }
479
480 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
481
482
483
484
485
486
487
488
489
490 static int save_tm_user_regs(struct pt_regs *regs,
491 struct mcontext __user *frame,
492 struct mcontext __user *tm_frame, int sigret,
493 unsigned long msr)
494 {
495 WARN_ON(tm_suspend_disabled);
496
497
498 if (save_general_regs(¤t->thread.ckpt_regs, frame)
499 || save_general_regs(regs, tm_frame))
500 return 1;
501
502
503
504
505
506
507
508 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
509 return 1;
510
511 #ifdef CONFIG_ALTIVEC
512
513 if (current->thread.used_vr) {
514 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
515 ELF_NVRREG * sizeof(vector128)))
516 return 1;
517 if (msr & MSR_VEC) {
518 if (__copy_to_user(&tm_frame->mc_vregs,
519 ¤t->thread.vr_state,
520 ELF_NVRREG * sizeof(vector128)))
521 return 1;
522 } else {
523 if (__copy_to_user(&tm_frame->mc_vregs,
524 ¤t->thread.ckvr_state,
525 ELF_NVRREG * sizeof(vector128)))
526 return 1;
527 }
528
529
530
531
532 msr |= MSR_VEC;
533 }
534
535
536
537
538
539
540 if (cpu_has_feature(CPU_FTR_ALTIVEC))
541 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
542 if (__put_user(current->thread.ckvrsave,
543 (u32 __user *)&frame->mc_vregs[32]))
544 return 1;
545 if (msr & MSR_VEC) {
546 if (__put_user(current->thread.vrsave,
547 (u32 __user *)&tm_frame->mc_vregs[32]))
548 return 1;
549 } else {
550 if (__put_user(current->thread.ckvrsave,
551 (u32 __user *)&tm_frame->mc_vregs[32]))
552 return 1;
553 }
554 #endif
555
556 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
557 return 1;
558 if (msr & MSR_FP) {
559 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
560 return 1;
561 } else {
562 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
563 return 1;
564 }
565
566 #ifdef CONFIG_VSX
567
568
569
570
571
572
573 if (current->thread.used_vsr) {
574 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
575 return 1;
576 if (msr & MSR_VSX) {
577 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
578 current))
579 return 1;
580 } else {
581 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
582 return 1;
583 }
584
585 msr |= MSR_VSX;
586 }
587 #endif
588 #ifdef CONFIG_SPE
589
590
591
592 if (current->thread.used_spe) {
593 flush_spe_to_thread(current);
594 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
595 ELF_NEVRREG * sizeof(u32)))
596 return 1;
597
598
599 msr |= MSR_SPE;
600 }
601
602
603 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
604 return 1;
605 #endif
606
607 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
608 return 1;
609 if (sigret) {
610
611 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
612 || __put_user(PPC_INST_SC, &frame->tramp[1]))
613 return 1;
614 flush_icache_range((unsigned long) &frame->tramp[0],
615 (unsigned long) &frame->tramp[2]);
616 }
617
618 return 0;
619 }
620 #endif
621
622
623
624
625
626 static long restore_user_regs(struct pt_regs *regs,
627 struct mcontext __user *sr, int sig)
628 {
629 long err;
630 unsigned int save_r2 = 0;
631 unsigned long msr;
632 #ifdef CONFIG_VSX
633 int i;
634 #endif
635
636
637
638
639
640 if (!sig)
641 save_r2 = (unsigned int)regs->gpr[2];
642 err = restore_general_regs(regs, sr);
643 regs->trap = 0;
644 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
645 if (!sig)
646 regs->gpr[2] = (unsigned long) save_r2;
647 if (err)
648 return 1;
649
650
651 if (sig)
652 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
653
654 #ifdef CONFIG_ALTIVEC
655
656
657
658
659 regs->msr &= ~MSR_VEC;
660 if (msr & MSR_VEC) {
661
662 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
663 sizeof(sr->mc_vregs)))
664 return 1;
665 current->thread.used_vr = true;
666 } else if (current->thread.used_vr)
667 memset(¤t->thread.vr_state, 0,
668 ELF_NVRREG * sizeof(vector128));
669
670
671 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
672 return 1;
673 if (cpu_has_feature(CPU_FTR_ALTIVEC))
674 mtspr(SPRN_VRSAVE, current->thread.vrsave);
675 #endif
676 if (copy_fpr_from_user(current, &sr->mc_fregs))
677 return 1;
678
679 #ifdef CONFIG_VSX
680
681
682
683
684 regs->msr &= ~MSR_VSX;
685 if (msr & MSR_VSX) {
686
687
688
689
690 if (copy_vsx_from_user(current, &sr->mc_vsregs))
691 return 1;
692 current->thread.used_vsr = true;
693 } else if (current->thread.used_vsr)
694 for (i = 0; i < 32 ; i++)
695 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
696 #endif
697
698
699
700
701 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
702
703 #ifdef CONFIG_SPE
704
705
706 regs->msr &= ~MSR_SPE;
707 if (msr & MSR_SPE) {
708
709 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
710 ELF_NEVRREG * sizeof(u32)))
711 return 1;
712 current->thread.used_spe = true;
713 } else if (current->thread.used_spe)
714 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
715
716
717 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
718 return 1;
719 #endif
720
721 return 0;
722 }
723
724 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
725
726
727
728
729
730 static long restore_tm_user_regs(struct pt_regs *regs,
731 struct mcontext __user *sr,
732 struct mcontext __user *tm_sr)
733 {
734 long err;
735 unsigned long msr, msr_hi;
736 #ifdef CONFIG_VSX
737 int i;
738 #endif
739
740 if (tm_suspend_disabled)
741 return 1;
742
743
744
745
746
747
748
749 err = restore_general_regs(regs, tm_sr);
750 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
751
752 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
753
754 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
755 if (err)
756 return 1;
757
758
759 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
760
761 #ifdef CONFIG_ALTIVEC
762 regs->msr &= ~MSR_VEC;
763 if (msr & MSR_VEC) {
764
765 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
766 sizeof(sr->mc_vregs)) ||
767 __copy_from_user(¤t->thread.vr_state,
768 &tm_sr->mc_vregs,
769 sizeof(sr->mc_vregs)))
770 return 1;
771 current->thread.used_vr = true;
772 } else if (current->thread.used_vr) {
773 memset(¤t->thread.vr_state, 0,
774 ELF_NVRREG * sizeof(vector128));
775 memset(¤t->thread.ckvr_state, 0,
776 ELF_NVRREG * sizeof(vector128));
777 }
778
779
780 if (__get_user(current->thread.ckvrsave,
781 (u32 __user *)&sr->mc_vregs[32]) ||
782 __get_user(current->thread.vrsave,
783 (u32 __user *)&tm_sr->mc_vregs[32]))
784 return 1;
785 if (cpu_has_feature(CPU_FTR_ALTIVEC))
786 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
787 #endif
788
789 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
790
791 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
792 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
793 return 1;
794
795 #ifdef CONFIG_VSX
796 regs->msr &= ~MSR_VSX;
797 if (msr & MSR_VSX) {
798
799
800
801
802 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
803 copy_ckvsx_from_user(current, &sr->mc_vsregs))
804 return 1;
805 current->thread.used_vsr = true;
806 } else if (current->thread.used_vsr)
807 for (i = 0; i < 32 ; i++) {
808 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
809 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
810 }
811 #endif
812
813 #ifdef CONFIG_SPE
814
815
816
817 regs->msr &= ~MSR_SPE;
818 if (msr & MSR_SPE) {
819 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
820 ELF_NEVRREG * sizeof(u32)))
821 return 1;
822 current->thread.used_spe = true;
823 } else if (current->thread.used_spe)
824 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
825
826
827 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
828 + ELF_NEVRREG))
829 return 1;
830 #endif
831
832
833 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
834 return 1;
835 msr_hi <<= 32;
836
837 if (MSR_TM_RESV(msr_hi))
838 return 1;
839
840
841
842
843
844 preempt_disable();
845
846
847
848
849
850
851
852
853
854
855
856 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
857
858
859
860
861 tm_enable();
862
863 current->thread.tm_texasr |= TEXASR_FS;
864
865 tm_recheckpoint(¤t->thread);
866
867
868 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
869 if (msr & MSR_FP) {
870 load_fp_state(¤t->thread.fp_state);
871 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
872 }
873 #ifdef CONFIG_ALTIVEC
874 if (msr & MSR_VEC) {
875 load_vr_state(¤t->thread.vr_state);
876 regs->msr |= MSR_VEC;
877 }
878 #endif
879
880 preempt_enable();
881
882 return 0;
883 }
884 #endif
885
886 #ifdef CONFIG_PPC64
887
888 #define copy_siginfo_to_user copy_siginfo_to_user32
889
890 #endif
891
892
893
894
895
896 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
897 struct task_struct *tsk)
898 {
899 struct rt_sigframe __user *rt_sf;
900 struct mcontext __user *frame;
901 struct mcontext __user *tm_frame = NULL;
902 void __user *addr;
903 unsigned long newsp = 0;
904 int sigret;
905 unsigned long tramp;
906 struct pt_regs *regs = tsk->thread.regs;
907 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
908
909 unsigned long msr = regs->msr;
910 #endif
911
912 BUG_ON(tsk != current);
913
914
915
916 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
917 addr = rt_sf;
918 if (unlikely(rt_sf == NULL))
919 goto badframe;
920
921
922 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
923 || __put_user(0, &rt_sf->uc.uc_flags)
924 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
925 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
926 &rt_sf->uc.uc_regs)
927 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
928 goto badframe;
929
930
931 frame = &rt_sf->uc.uc_mcontext;
932 addr = frame;
933 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
934 sigret = 0;
935 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
936 } else {
937 sigret = __NR_rt_sigreturn;
938 tramp = (unsigned long) frame->tramp;
939 }
940
941 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
942 tm_frame = &rt_sf->uc_transact.uc_mcontext;
943 if (MSR_TM_ACTIVE(msr)) {
944 if (__put_user((unsigned long)&rt_sf->uc_transact,
945 &rt_sf->uc.uc_link) ||
946 __put_user((unsigned long)tm_frame,
947 &rt_sf->uc_transact.uc_regs))
948 goto badframe;
949 if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
950 goto badframe;
951 }
952 else
953 #endif
954 {
955 if (__put_user(0, &rt_sf->uc.uc_link))
956 goto badframe;
957 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
958 goto badframe;
959 }
960 regs->link = tramp;
961
962 tsk->thread.fp_state.fpscr = 0;
963
964
965 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
966 addr = (void __user *)regs->gpr[1];
967 if (put_user(regs->gpr[1], (u32 __user *)newsp))
968 goto badframe;
969
970
971 regs->gpr[1] = newsp;
972 regs->gpr[3] = ksig->sig;
973 regs->gpr[4] = (unsigned long) &rt_sf->info;
974 regs->gpr[5] = (unsigned long) &rt_sf->uc;
975 regs->gpr[6] = (unsigned long) rt_sf;
976 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
977
978 regs->msr &= ~MSR_LE;
979 regs->msr |= (MSR_KERNEL & MSR_LE);
980 return 0;
981
982 badframe:
983 if (show_unhandled_signals)
984 printk_ratelimited(KERN_INFO
985 "%s[%d]: bad frame in handle_rt_signal32: "
986 "%p nip %08lx lr %08lx\n",
987 tsk->comm, tsk->pid,
988 addr, regs->nip, regs->link);
989
990 return 1;
991 }
992
993 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
994 {
995 sigset_t set;
996 struct mcontext __user *mcp;
997
998 if (get_sigset_t(&set, &ucp->uc_sigmask))
999 return -EFAULT;
1000 #ifdef CONFIG_PPC64
1001 {
1002 u32 cmcp;
1003
1004 if (__get_user(cmcp, &ucp->uc_regs))
1005 return -EFAULT;
1006 mcp = (struct mcontext __user *)(u64)cmcp;
1007
1008 }
1009 #else
1010 if (__get_user(mcp, &ucp->uc_regs))
1011 return -EFAULT;
1012 if (!access_ok(mcp, sizeof(*mcp)))
1013 return -EFAULT;
1014 #endif
1015 set_current_blocked(&set);
1016 if (restore_user_regs(regs, mcp, sig))
1017 return -EFAULT;
1018
1019 return 0;
1020 }
1021
1022 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1023 static int do_setcontext_tm(struct ucontext __user *ucp,
1024 struct ucontext __user *tm_ucp,
1025 struct pt_regs *regs)
1026 {
1027 sigset_t set;
1028 struct mcontext __user *mcp;
1029 struct mcontext __user *tm_mcp;
1030 u32 cmcp;
1031 u32 tm_cmcp;
1032
1033 if (get_sigset_t(&set, &ucp->uc_sigmask))
1034 return -EFAULT;
1035
1036 if (__get_user(cmcp, &ucp->uc_regs) ||
1037 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1038 return -EFAULT;
1039 mcp = (struct mcontext __user *)(u64)cmcp;
1040 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1041
1042
1043 set_current_blocked(&set);
1044 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1045 return -EFAULT;
1046
1047 return 0;
1048 }
1049 #endif
1050
1051 #ifdef CONFIG_PPC64
1052 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1053 struct ucontext __user *, new_ctx, int, ctx_size)
1054 #else
1055 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1056 struct ucontext __user *, new_ctx, long, ctx_size)
1057 #endif
1058 {
1059 struct pt_regs *regs = current_pt_regs();
1060 int ctx_has_vsx_region = 0;
1061
1062 #ifdef CONFIG_PPC64
1063 unsigned long new_msr = 0;
1064
1065 if (new_ctx) {
1066 struct mcontext __user *mcp;
1067 u32 cmcp;
1068
1069
1070
1071
1072
1073
1074 if (__get_user(cmcp, &new_ctx->uc_regs))
1075 return -EFAULT;
1076 mcp = (struct mcontext __user *)(u64)cmcp;
1077 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1078 return -EFAULT;
1079 }
1080
1081
1082
1083
1084 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1085 return -EINVAL;
1086
1087
1088
1089
1090 if ((ctx_size < sizeof(struct ucontext)) &&
1091 (new_msr & MSR_VSX))
1092 return -EINVAL;
1093
1094 if (ctx_size >= sizeof(struct ucontext))
1095 ctx_has_vsx_region = 1;
1096 #else
1097
1098
1099
1100 if (ctx_size < sizeof(struct ucontext))
1101 return -EINVAL;
1102 #endif
1103 if (old_ctx != NULL) {
1104 struct mcontext __user *mctx;
1105
1106
1107
1108
1109
1110
1111
1112
1113 mctx = (struct mcontext __user *)
1114 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1115 if (!access_ok(old_ctx, ctx_size)
1116 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1117 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1118 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1119 return -EFAULT;
1120 }
1121 if (new_ctx == NULL)
1122 return 0;
1123 if (!access_ok(new_ctx, ctx_size) ||
1124 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1125 return -EFAULT;
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (do_setcontext(new_ctx, regs, 0))
1139 do_exit(SIGSEGV);
1140
1141 set_thread_flag(TIF_RESTOREALL);
1142 return 0;
1143 }
1144
1145 #ifdef CONFIG_PPC64
1146 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1147 #else
1148 SYSCALL_DEFINE0(rt_sigreturn)
1149 #endif
1150 {
1151 struct rt_sigframe __user *rt_sf;
1152 struct pt_regs *regs = current_pt_regs();
1153 int tm_restore = 0;
1154 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1155 struct ucontext __user *uc_transact;
1156 unsigned long msr_hi;
1157 unsigned long tmp;
1158 #endif
1159
1160 current->restart_block.fn = do_no_restart_syscall;
1161
1162 rt_sf = (struct rt_sigframe __user *)
1163 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1164 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1165 goto bad;
1166
1167 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 if (MSR_TM_SUSPENDED(mfmsr()))
1179 tm_reclaim_current(0);
1180
1181 if (__get_user(tmp, &rt_sf->uc.uc_link))
1182 goto bad;
1183 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1184 if (uc_transact) {
1185 u32 cmcp;
1186 struct mcontext __user *mcp;
1187
1188 if (__get_user(cmcp, &uc_transact->uc_regs))
1189 return -EFAULT;
1190 mcp = (struct mcontext __user *)(u64)cmcp;
1191
1192
1193 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1194 goto bad;
1195
1196 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1197
1198 if (!cpu_has_feature(CPU_FTR_TM))
1199 goto bad;
1200
1201
1202
1203 tm_restore = 1;
1204 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1205 goto bad;
1206 }
1207 }
1208 if (!tm_restore) {
1209
1210
1211
1212
1213
1214 regs->msr &= ~MSR_TS_MASK;
1215 }
1216
1217 #endif
1218 if (!tm_restore)
1219 if (do_setcontext(&rt_sf->uc, regs, 1))
1220 goto bad;
1221
1222
1223
1224
1225
1226
1227
1228
1229 #ifdef CONFIG_PPC64
1230 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1231 goto bad;
1232 #else
1233 if (restore_altstack(&rt_sf->uc.uc_stack))
1234 goto bad;
1235 #endif
1236 set_thread_flag(TIF_RESTOREALL);
1237 return 0;
1238
1239 bad:
1240 if (show_unhandled_signals)
1241 printk_ratelimited(KERN_INFO
1242 "%s[%d]: bad frame in sys_rt_sigreturn: "
1243 "%p nip %08lx lr %08lx\n",
1244 current->comm, current->pid,
1245 rt_sf, regs->nip, regs->link);
1246
1247 force_sig(SIGSEGV);
1248 return 0;
1249 }
1250
1251 #ifdef CONFIG_PPC32
1252 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1253 int, ndbg, struct sig_dbg_op __user *, dbg)
1254 {
1255 struct pt_regs *regs = current_pt_regs();
1256 struct sig_dbg_op op;
1257 int i;
1258 unsigned long new_msr = regs->msr;
1259 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1260 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1261 #endif
1262
1263 for (i=0; i<ndbg; i++) {
1264 if (copy_from_user(&op, dbg + i, sizeof(op)))
1265 return -EFAULT;
1266 switch (op.dbg_type) {
1267 case SIG_DBG_SINGLE_STEPPING:
1268 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1269 if (op.dbg_value) {
1270 new_msr |= MSR_DE;
1271 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1272 } else {
1273 new_dbcr0 &= ~DBCR0_IC;
1274 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1275 current->thread.debug.dbcr1)) {
1276 new_msr &= ~MSR_DE;
1277 new_dbcr0 &= ~DBCR0_IDM;
1278 }
1279 }
1280 #else
1281 if (op.dbg_value)
1282 new_msr |= MSR_SE;
1283 else
1284 new_msr &= ~MSR_SE;
1285 #endif
1286 break;
1287 case SIG_DBG_BRANCH_TRACING:
1288 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1289 return -EINVAL;
1290 #else
1291 if (op.dbg_value)
1292 new_msr |= MSR_BE;
1293 else
1294 new_msr &= ~MSR_BE;
1295 #endif
1296 break;
1297
1298 default:
1299 return -EINVAL;
1300 }
1301 }
1302
1303
1304
1305
1306
1307
1308 regs->msr = new_msr;
1309 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1310 current->thread.debug.dbcr0 = new_dbcr0;
1311 #endif
1312
1313 if (!access_ok(ctx, sizeof(*ctx)) ||
1314 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1315 return -EFAULT;
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 if (do_setcontext(ctx, regs, 1)) {
1329 if (show_unhandled_signals)
1330 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1331 "sys_debug_setcontext: %p nip %08lx "
1332 "lr %08lx\n",
1333 current->comm, current->pid,
1334 ctx, regs->nip, regs->link);
1335
1336 force_sig(SIGSEGV);
1337 goto out;
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347 restore_altstack(&ctx->uc_stack);
1348
1349 set_thread_flag(TIF_RESTOREALL);
1350 out:
1351 return 0;
1352 }
1353 #endif
1354
1355
1356
1357
1358 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1359 struct task_struct *tsk)
1360 {
1361 struct sigcontext __user *sc;
1362 struct sigframe __user *frame;
1363 struct mcontext __user *tm_mctx = NULL;
1364 unsigned long newsp = 0;
1365 int sigret;
1366 unsigned long tramp;
1367 struct pt_regs *regs = tsk->thread.regs;
1368 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1369
1370 unsigned long msr = regs->msr;
1371 #endif
1372
1373 BUG_ON(tsk != current);
1374
1375
1376 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1377 if (unlikely(frame == NULL))
1378 goto badframe;
1379 sc = (struct sigcontext __user *) &frame->sctx;
1380
1381 #if _NSIG != 64
1382 #error "Please adjust handle_signal()"
1383 #endif
1384 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1385 || __put_user(oldset->sig[0], &sc->oldmask)
1386 #ifdef CONFIG_PPC64
1387 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1388 #else
1389 || __put_user(oldset->sig[1], &sc->_unused[3])
1390 #endif
1391 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1392 || __put_user(ksig->sig, &sc->signal))
1393 goto badframe;
1394
1395 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1396 sigret = 0;
1397 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1398 } else {
1399 sigret = __NR_sigreturn;
1400 tramp = (unsigned long) frame->mctx.tramp;
1401 }
1402
1403 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1404 tm_mctx = &frame->mctx_transact;
1405 if (MSR_TM_ACTIVE(msr)) {
1406 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1407 sigret, msr))
1408 goto badframe;
1409 }
1410 else
1411 #endif
1412 {
1413 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1414 goto badframe;
1415 }
1416
1417 regs->link = tramp;
1418
1419 tsk->thread.fp_state.fpscr = 0;
1420
1421
1422 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1423 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1424 goto badframe;
1425
1426 regs->gpr[1] = newsp;
1427 regs->gpr[3] = ksig->sig;
1428 regs->gpr[4] = (unsigned long) sc;
1429 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1430
1431 regs->msr &= ~MSR_LE;
1432 return 0;
1433
1434 badframe:
1435 if (show_unhandled_signals)
1436 printk_ratelimited(KERN_INFO
1437 "%s[%d]: bad frame in handle_signal32: "
1438 "%p nip %08lx lr %08lx\n",
1439 tsk->comm, tsk->pid,
1440 frame, regs->nip, regs->link);
1441
1442 return 1;
1443 }
1444
1445
1446
1447
1448 #ifdef CONFIG_PPC64
1449 COMPAT_SYSCALL_DEFINE0(sigreturn)
1450 #else
1451 SYSCALL_DEFINE0(sigreturn)
1452 #endif
1453 {
1454 struct pt_regs *regs = current_pt_regs();
1455 struct sigframe __user *sf;
1456 struct sigcontext __user *sc;
1457 struct sigcontext sigctx;
1458 struct mcontext __user *sr;
1459 void __user *addr;
1460 sigset_t set;
1461 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1462 struct mcontext __user *mcp, *tm_mcp;
1463 unsigned long msr_hi;
1464 #endif
1465
1466
1467 current->restart_block.fn = do_no_restart_syscall;
1468
1469 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1470 sc = &sf->sctx;
1471 addr = sc;
1472 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1473 goto badframe;
1474
1475 #ifdef CONFIG_PPC64
1476
1477
1478
1479
1480 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1481 #else
1482 set.sig[0] = sigctx.oldmask;
1483 set.sig[1] = sigctx._unused[3];
1484 #endif
1485 set_current_blocked(&set);
1486
1487 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1488 mcp = (struct mcontext __user *)&sf->mctx;
1489 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1490 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1491 goto badframe;
1492 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1493 if (!cpu_has_feature(CPU_FTR_TM))
1494 goto badframe;
1495 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1496 goto badframe;
1497 } else
1498 #endif
1499 {
1500 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1501 addr = sr;
1502 if (!access_ok(sr, sizeof(*sr))
1503 || restore_user_regs(regs, sr, 1))
1504 goto badframe;
1505 }
1506
1507 set_thread_flag(TIF_RESTOREALL);
1508 return 0;
1509
1510 badframe:
1511 if (show_unhandled_signals)
1512 printk_ratelimited(KERN_INFO
1513 "%s[%d]: bad frame in sys_sigreturn: "
1514 "%p nip %08lx lr %08lx\n",
1515 current->comm, current->pid,
1516 addr, regs->nip, regs->link);
1517
1518 force_sig(SIGSEGV);
1519 return 0;
1520 }