This source file includes following definitions.
- check_if_tm_restore_required
- check_if_tm_restore_required
- enable_strict_msr_control
- msr_check_and_set
- __msr_check_and_clear
- __giveup_fpu
- giveup_fpu
- flush_fp_to_thread
- enable_kernel_fp
- restore_fp
- restore_fp
- __giveup_altivec
- giveup_altivec
- enable_kernel_altivec
- flush_altivec_to_thread
- restore_altivec
- restore_altivec
- __giveup_vsx
- giveup_vsx
- enable_kernel_vsx
- flush_vsx_to_thread
- restore_vsx
- restore_vsx
- giveup_spe
- enable_kernel_spe
- flush_spe_to_thread
- init_msr_all_available
- giveup_all
- restore_math
- save_all
- flush_all_to_thread
- do_send_trap
- do_break
- set_debug_reg_defaults
- prime_debug_regs
- switch_booke_debug_regs
- set_breakpoint
- set_debug_reg_defaults
- __set_dabr
- __set_dabr
- __set_dabr
- __set_dabr
- set_dabr
- __set_breakpoint
- ppc_breakpoint_available
- hw_brk_match
- tm_enabled
- tm_reclaim_thread
- tm_reclaim_current
- tm_reclaim_task
- tm_recheckpoint
- tm_recheckpoint_new_task
- __switch_to_tm
- restore_tm_state
- save_sprs
- restore_sprs
- __switch_to
- show_instructions
- show_user_instructions
- print_bits
- print_tm_bits
- print_tm_bits
- print_msr_bits
- show_regs
- flush_thread
- arch_setup_new_exec
- set_thread_uses_vas
- set_thread_tidr
- release_thread
- arch_dup_task_struct
- setup_ksp_vsid
- copy_thread_tls
- start_thread
- set_fpexc_mode
- get_fpexc_mode
- set_endian
- get_endian
- set_unalign_ctl
- get_unalign_ctl
- valid_irq_stack
- validate_sp
- __get_wchan
- get_wchan
- show_stack
- __ppc64_runlatch_on
- __ppc64_runlatch_off
- arch_align_stack
- brk_rnd
- arch_randomize_brk
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/elf.h>
27 #include <linux/prctl.h>
28 #include <linux/init_task.h>
29 #include <linux/export.h>
30 #include <linux/kallsyms.h>
31 #include <linux/mqueue.h>
32 #include <linux/hardirq.h>
33 #include <linux/utsname.h>
34 #include <linux/ftrace.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/personality.h>
37 #include <linux/random.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf-randomize.h>
41 #include <linux/pkeys.h>
42 #include <linux/seq_buf.h>
43
44 #include <asm/pgtable.h>
45 #include <asm/io.h>
46 #include <asm/processor.h>
47 #include <asm/mmu.h>
48 #include <asm/prom.h>
49 #include <asm/machdep.h>
50 #include <asm/time.h>
51 #include <asm/runlatch.h>
52 #include <asm/syscalls.h>
53 #include <asm/switch_to.h>
54 #include <asm/tm.h>
55 #include <asm/debug.h>
56 #ifdef CONFIG_PPC64
57 #include <asm/firmware.h>
58 #include <asm/hw_irq.h>
59 #endif
60 #include <asm/code-patching.h>
61 #include <asm/exec.h>
62 #include <asm/livepatch.h>
63 #include <asm/cpu_has_feature.h>
64 #include <asm/asm-prototypes.h>
65 #include <asm/stacktrace.h>
66 #include <asm/hw_breakpoint.h>
67
68 #include <linux/kprobes.h>
69 #include <linux/kdebug.h>
70
71
72 #ifdef TM_DEBUG_SW
73 #define TM_DEBUG(x...) printk(KERN_INFO x)
74 #else
75 #define TM_DEBUG(x...) do { } while(0)
76 #endif
77
78 extern unsigned long _get_SP(void);
79
80 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81
82
83
84
85
86 bool tm_suspend_disabled __ro_after_init = false;
87
88 static void check_if_tm_restore_required(struct task_struct *tsk)
89 {
90
91
92
93
94
95
96 if (tsk == current && tsk->thread.regs &&
97 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98 !test_thread_flag(TIF_RESTORE_TM)) {
99 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
100 set_thread_flag(TIF_RESTORE_TM);
101 }
102 }
103
104 #else
105 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
106 #endif
107
108 bool strict_msr_control;
109 EXPORT_SYMBOL(strict_msr_control);
110
111 static int __init enable_strict_msr_control(char *str)
112 {
113 strict_msr_control = true;
114 pr_info("Enabling strict facility control\n");
115
116 return 0;
117 }
118 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
119
120
121 unsigned long notrace msr_check_and_set(unsigned long bits)
122 {
123 unsigned long oldmsr = mfmsr();
124 unsigned long newmsr;
125
126 newmsr = oldmsr | bits;
127
128 #ifdef CONFIG_VSX
129 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130 newmsr |= MSR_VSX;
131 #endif
132
133 if (oldmsr != newmsr)
134 mtmsr_isync(newmsr);
135
136 return newmsr;
137 }
138 EXPORT_SYMBOL_GPL(msr_check_and_set);
139
140
141 void notrace __msr_check_and_clear(unsigned long bits)
142 {
143 unsigned long oldmsr = mfmsr();
144 unsigned long newmsr;
145
146 newmsr = oldmsr & ~bits;
147
148 #ifdef CONFIG_VSX
149 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
150 newmsr &= ~MSR_VSX;
151 #endif
152
153 if (oldmsr != newmsr)
154 mtmsr_isync(newmsr);
155 }
156 EXPORT_SYMBOL(__msr_check_and_clear);
157
158 #ifdef CONFIG_PPC_FPU
159 static void __giveup_fpu(struct task_struct *tsk)
160 {
161 unsigned long msr;
162
163 save_fpu(tsk);
164 msr = tsk->thread.regs->msr;
165 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
166 #ifdef CONFIG_VSX
167 if (cpu_has_feature(CPU_FTR_VSX))
168 msr &= ~MSR_VSX;
169 #endif
170 tsk->thread.regs->msr = msr;
171 }
172
173 void giveup_fpu(struct task_struct *tsk)
174 {
175 check_if_tm_restore_required(tsk);
176
177 msr_check_and_set(MSR_FP);
178 __giveup_fpu(tsk);
179 msr_check_and_clear(MSR_FP);
180 }
181 EXPORT_SYMBOL(giveup_fpu);
182
183
184
185
186
187 void flush_fp_to_thread(struct task_struct *tsk)
188 {
189 if (tsk->thread.regs) {
190
191
192
193
194
195
196
197
198 preempt_disable();
199 if (tsk->thread.regs->msr & MSR_FP) {
200
201
202
203
204
205
206
207 BUG_ON(tsk != current);
208 giveup_fpu(tsk);
209 }
210 preempt_enable();
211 }
212 }
213 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
214
215 void enable_kernel_fp(void)
216 {
217 unsigned long cpumsr;
218
219 WARN_ON(preemptible());
220
221 cpumsr = msr_check_and_set(MSR_FP);
222
223 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
224 check_if_tm_restore_required(current);
225
226
227
228
229
230
231
232 if (!MSR_TM_ACTIVE(cpumsr) &&
233 MSR_TM_ACTIVE(current->thread.regs->msr))
234 return;
235 __giveup_fpu(current);
236 }
237 }
238 EXPORT_SYMBOL(enable_kernel_fp);
239
240 static int restore_fp(struct task_struct *tsk)
241 {
242 if (tsk->thread.load_fp) {
243 load_fp_state(¤t->thread.fp_state);
244 current->thread.load_fp++;
245 return 1;
246 }
247 return 0;
248 }
249 #else
250 static int restore_fp(struct task_struct *tsk) { return 0; }
251 #endif
252
253 #ifdef CONFIG_ALTIVEC
254 #define loadvec(thr) ((thr).load_vec)
255
256 static void __giveup_altivec(struct task_struct *tsk)
257 {
258 unsigned long msr;
259
260 save_altivec(tsk);
261 msr = tsk->thread.regs->msr;
262 msr &= ~MSR_VEC;
263 #ifdef CONFIG_VSX
264 if (cpu_has_feature(CPU_FTR_VSX))
265 msr &= ~MSR_VSX;
266 #endif
267 tsk->thread.regs->msr = msr;
268 }
269
270 void giveup_altivec(struct task_struct *tsk)
271 {
272 check_if_tm_restore_required(tsk);
273
274 msr_check_and_set(MSR_VEC);
275 __giveup_altivec(tsk);
276 msr_check_and_clear(MSR_VEC);
277 }
278 EXPORT_SYMBOL(giveup_altivec);
279
280 void enable_kernel_altivec(void)
281 {
282 unsigned long cpumsr;
283
284 WARN_ON(preemptible());
285
286 cpumsr = msr_check_and_set(MSR_VEC);
287
288 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
289 check_if_tm_restore_required(current);
290
291
292
293
294
295
296
297 if (!MSR_TM_ACTIVE(cpumsr) &&
298 MSR_TM_ACTIVE(current->thread.regs->msr))
299 return;
300 __giveup_altivec(current);
301 }
302 }
303 EXPORT_SYMBOL(enable_kernel_altivec);
304
305
306
307
308
309 void flush_altivec_to_thread(struct task_struct *tsk)
310 {
311 if (tsk->thread.regs) {
312 preempt_disable();
313 if (tsk->thread.regs->msr & MSR_VEC) {
314 BUG_ON(tsk != current);
315 giveup_altivec(tsk);
316 }
317 preempt_enable();
318 }
319 }
320 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
321
322 static int restore_altivec(struct task_struct *tsk)
323 {
324 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
325 load_vr_state(&tsk->thread.vr_state);
326 tsk->thread.used_vr = 1;
327 tsk->thread.load_vec++;
328
329 return 1;
330 }
331 return 0;
332 }
333 #else
334 #define loadvec(thr) 0
335 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
336 #endif
337
338 #ifdef CONFIG_VSX
339 static void __giveup_vsx(struct task_struct *tsk)
340 {
341 unsigned long msr = tsk->thread.regs->msr;
342
343
344
345
346
347 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
348
349
350 if (msr & MSR_FP)
351 __giveup_fpu(tsk);
352 if (msr & MSR_VEC)
353 __giveup_altivec(tsk);
354 }
355
356 static void giveup_vsx(struct task_struct *tsk)
357 {
358 check_if_tm_restore_required(tsk);
359
360 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
361 __giveup_vsx(tsk);
362 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
363 }
364
365 void enable_kernel_vsx(void)
366 {
367 unsigned long cpumsr;
368
369 WARN_ON(preemptible());
370
371 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
372
373 if (current->thread.regs &&
374 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
375 check_if_tm_restore_required(current);
376
377
378
379
380
381
382
383 if (!MSR_TM_ACTIVE(cpumsr) &&
384 MSR_TM_ACTIVE(current->thread.regs->msr))
385 return;
386 __giveup_vsx(current);
387 }
388 }
389 EXPORT_SYMBOL(enable_kernel_vsx);
390
391 void flush_vsx_to_thread(struct task_struct *tsk)
392 {
393 if (tsk->thread.regs) {
394 preempt_disable();
395 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
396 BUG_ON(tsk != current);
397 giveup_vsx(tsk);
398 }
399 preempt_enable();
400 }
401 }
402 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
403
404 static int restore_vsx(struct task_struct *tsk)
405 {
406 if (cpu_has_feature(CPU_FTR_VSX)) {
407 tsk->thread.used_vsr = 1;
408 return 1;
409 }
410
411 return 0;
412 }
413 #else
414 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
415 #endif
416
417 #ifdef CONFIG_SPE
418 void giveup_spe(struct task_struct *tsk)
419 {
420 check_if_tm_restore_required(tsk);
421
422 msr_check_and_set(MSR_SPE);
423 __giveup_spe(tsk);
424 msr_check_and_clear(MSR_SPE);
425 }
426 EXPORT_SYMBOL(giveup_spe);
427
428 void enable_kernel_spe(void)
429 {
430 WARN_ON(preemptible());
431
432 msr_check_and_set(MSR_SPE);
433
434 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
435 check_if_tm_restore_required(current);
436 __giveup_spe(current);
437 }
438 }
439 EXPORT_SYMBOL(enable_kernel_spe);
440
441 void flush_spe_to_thread(struct task_struct *tsk)
442 {
443 if (tsk->thread.regs) {
444 preempt_disable();
445 if (tsk->thread.regs->msr & MSR_SPE) {
446 BUG_ON(tsk != current);
447 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
448 giveup_spe(tsk);
449 }
450 preempt_enable();
451 }
452 }
453 #endif
454
455 static unsigned long msr_all_available;
456
457 static int __init init_msr_all_available(void)
458 {
459 #ifdef CONFIG_PPC_FPU
460 msr_all_available |= MSR_FP;
461 #endif
462 #ifdef CONFIG_ALTIVEC
463 if (cpu_has_feature(CPU_FTR_ALTIVEC))
464 msr_all_available |= MSR_VEC;
465 #endif
466 #ifdef CONFIG_VSX
467 if (cpu_has_feature(CPU_FTR_VSX))
468 msr_all_available |= MSR_VSX;
469 #endif
470 #ifdef CONFIG_SPE
471 if (cpu_has_feature(CPU_FTR_SPE))
472 msr_all_available |= MSR_SPE;
473 #endif
474
475 return 0;
476 }
477 early_initcall(init_msr_all_available);
478
479 void giveup_all(struct task_struct *tsk)
480 {
481 unsigned long usermsr;
482
483 if (!tsk->thread.regs)
484 return;
485
486 check_if_tm_restore_required(tsk);
487
488 usermsr = tsk->thread.regs->msr;
489
490 if ((usermsr & msr_all_available) == 0)
491 return;
492
493 msr_check_and_set(msr_all_available);
494
495 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
496
497 #ifdef CONFIG_PPC_FPU
498 if (usermsr & MSR_FP)
499 __giveup_fpu(tsk);
500 #endif
501 #ifdef CONFIG_ALTIVEC
502 if (usermsr & MSR_VEC)
503 __giveup_altivec(tsk);
504 #endif
505 #ifdef CONFIG_SPE
506 if (usermsr & MSR_SPE)
507 __giveup_spe(tsk);
508 #endif
509
510 msr_check_and_clear(msr_all_available);
511 }
512 EXPORT_SYMBOL(giveup_all);
513
514
515
516
517
518
519
520
521
522
523
524 void notrace restore_math(struct pt_regs *regs)
525 {
526 unsigned long msr;
527
528 if (!MSR_TM_ACTIVE(regs->msr) &&
529 !current->thread.load_fp && !loadvec(current->thread))
530 return;
531
532 msr = regs->msr;
533 msr_check_and_set(msr_all_available);
534
535
536
537
538
539 if ((!(msr & MSR_FP)) && restore_fp(current))
540 msr |= MSR_FP | current->thread.fpexc_mode;
541
542 if ((!(msr & MSR_VEC)) && restore_altivec(current))
543 msr |= MSR_VEC;
544
545 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
546 restore_vsx(current)) {
547 msr |= MSR_VSX;
548 }
549
550 msr_check_and_clear(msr_all_available);
551
552 regs->msr = msr;
553 }
554
555 static void save_all(struct task_struct *tsk)
556 {
557 unsigned long usermsr;
558
559 if (!tsk->thread.regs)
560 return;
561
562 usermsr = tsk->thread.regs->msr;
563
564 if ((usermsr & msr_all_available) == 0)
565 return;
566
567 msr_check_and_set(msr_all_available);
568
569 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
570
571 if (usermsr & MSR_FP)
572 save_fpu(tsk);
573
574 if (usermsr & MSR_VEC)
575 save_altivec(tsk);
576
577 if (usermsr & MSR_SPE)
578 __giveup_spe(tsk);
579
580 msr_check_and_clear(msr_all_available);
581 thread_pkey_regs_save(&tsk->thread);
582 }
583
584 void flush_all_to_thread(struct task_struct *tsk)
585 {
586 if (tsk->thread.regs) {
587 preempt_disable();
588 BUG_ON(tsk != current);
589 #ifdef CONFIG_SPE
590 if (tsk->thread.regs->msr & MSR_SPE)
591 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
592 #endif
593 save_all(tsk);
594
595 preempt_enable();
596 }
597 }
598 EXPORT_SYMBOL(flush_all_to_thread);
599
600 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
601 void do_send_trap(struct pt_regs *regs, unsigned long address,
602 unsigned long error_code, int breakpt)
603 {
604 current->thread.trap_nr = TRAP_HWBKPT;
605 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
606 11, SIGSEGV) == NOTIFY_STOP)
607 return;
608
609
610 force_sig_ptrace_errno_trap(breakpt,
611 (void __user *)address);
612 }
613 #else
614 void do_break (struct pt_regs *regs, unsigned long address,
615 unsigned long error_code)
616 {
617 current->thread.trap_nr = TRAP_HWBKPT;
618 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
619 11, SIGSEGV) == NOTIFY_STOP)
620 return;
621
622 if (debugger_break_match(regs))
623 return;
624
625
626 hw_breakpoint_disable();
627
628
629 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
630 }
631 #endif
632
633 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
634
635 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
636
637
638
639 static void set_debug_reg_defaults(struct thread_struct *thread)
640 {
641 thread->debug.iac1 = thread->debug.iac2 = 0;
642 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
643 thread->debug.iac3 = thread->debug.iac4 = 0;
644 #endif
645 thread->debug.dac1 = thread->debug.dac2 = 0;
646 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
647 thread->debug.dvc1 = thread->debug.dvc2 = 0;
648 #endif
649 thread->debug.dbcr0 = 0;
650 #ifdef CONFIG_BOOKE
651
652
653
654 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
655 DBCR1_IAC3US | DBCR1_IAC4US;
656
657
658
659
660 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
661 #else
662 thread->debug.dbcr1 = 0;
663 #endif
664 }
665
666 static void prime_debug_regs(struct debug_reg *debug)
667 {
668
669
670
671
672
673 mtmsr(mfmsr() & ~MSR_DE);
674
675 mtspr(SPRN_IAC1, debug->iac1);
676 mtspr(SPRN_IAC2, debug->iac2);
677 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
678 mtspr(SPRN_IAC3, debug->iac3);
679 mtspr(SPRN_IAC4, debug->iac4);
680 #endif
681 mtspr(SPRN_DAC1, debug->dac1);
682 mtspr(SPRN_DAC2, debug->dac2);
683 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
684 mtspr(SPRN_DVC1, debug->dvc1);
685 mtspr(SPRN_DVC2, debug->dvc2);
686 #endif
687 mtspr(SPRN_DBCR0, debug->dbcr0);
688 mtspr(SPRN_DBCR1, debug->dbcr1);
689 #ifdef CONFIG_BOOKE
690 mtspr(SPRN_DBCR2, debug->dbcr2);
691 #endif
692 }
693
694
695
696
697
698 void switch_booke_debug_regs(struct debug_reg *new_debug)
699 {
700 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
701 || (new_debug->dbcr0 & DBCR0_IDM))
702 prime_debug_regs(new_debug);
703 }
704 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
705 #else
706 #ifndef CONFIG_HAVE_HW_BREAKPOINT
707 static void set_breakpoint(struct arch_hw_breakpoint *brk)
708 {
709 preempt_disable();
710 __set_breakpoint(brk);
711 preempt_enable();
712 }
713
714 static void set_debug_reg_defaults(struct thread_struct *thread)
715 {
716 thread->hw_brk.address = 0;
717 thread->hw_brk.type = 0;
718 if (ppc_breakpoint_available())
719 set_breakpoint(&thread->hw_brk);
720 }
721 #endif
722 #endif
723
724 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
725 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
726 {
727 mtspr(SPRN_DAC1, dabr);
728 #ifdef CONFIG_PPC_47x
729 isync();
730 #endif
731 return 0;
732 }
733 #elif defined(CONFIG_PPC_BOOK3S)
734 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
735 {
736 mtspr(SPRN_DABR, dabr);
737 if (cpu_has_feature(CPU_FTR_DABRX))
738 mtspr(SPRN_DABRX, dabrx);
739 return 0;
740 }
741 #elif defined(CONFIG_PPC_8xx)
742 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
743 {
744 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
745 unsigned long lctrl1 = 0x90000000;
746 unsigned long lctrl2 = 0x8e000002;
747
748 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
749 lctrl1 |= 0xa0000;
750 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
751 lctrl1 |= 0xf0000;
752 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
753 lctrl2 = 0;
754
755 mtspr(SPRN_LCTRL2, 0);
756 mtspr(SPRN_CMPE, addr);
757 mtspr(SPRN_CMPF, addr + 4);
758 mtspr(SPRN_LCTRL1, lctrl1);
759 mtspr(SPRN_LCTRL2, lctrl2);
760
761 return 0;
762 }
763 #else
764 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
765 {
766 return -EINVAL;
767 }
768 #endif
769
770 static inline int set_dabr(struct arch_hw_breakpoint *brk)
771 {
772 unsigned long dabr, dabrx;
773
774 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
775 dabrx = ((brk->type >> 3) & 0x7);
776
777 if (ppc_md.set_dabr)
778 return ppc_md.set_dabr(dabr, dabrx);
779
780 return __set_dabr(dabr, dabrx);
781 }
782
783 void __set_breakpoint(struct arch_hw_breakpoint *brk)
784 {
785 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
786
787 if (dawr_enabled())
788
789 set_dawr(brk);
790 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
791
792 set_dabr(brk);
793 else
794
795 WARN_ON_ONCE(1);
796 }
797
798
799 bool ppc_breakpoint_available(void)
800 {
801 if (dawr_enabled())
802 return true;
803 if (cpu_has_feature(CPU_FTR_ARCH_207S))
804 return false;
805
806 return true;
807 }
808 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
809
810 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
811 struct arch_hw_breakpoint *b)
812 {
813 if (a->address != b->address)
814 return false;
815 if (a->type != b->type)
816 return false;
817 if (a->len != b->len)
818 return false;
819 return true;
820 }
821
822 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
823
824 static inline bool tm_enabled(struct task_struct *tsk)
825 {
826 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
827 }
828
829 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
830 {
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846 if (!MSR_TM_SUSPENDED(mfmsr()))
847 return;
848
849 giveup_all(container_of(thr, struct task_struct, thread));
850
851 tm_reclaim(thr, cause);
852
853
854
855
856
857
858
859
860
861
862
863
864
865 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
866 memcpy(&thr->ckfp_state, &thr->fp_state,
867 sizeof(struct thread_fp_state));
868 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
869 memcpy(&thr->ckvr_state, &thr->vr_state,
870 sizeof(struct thread_vr_state));
871 }
872
873 void tm_reclaim_current(uint8_t cause)
874 {
875 tm_enable();
876 tm_reclaim_thread(¤t->thread, cause);
877 }
878
879 static inline void tm_reclaim_task(struct task_struct *tsk)
880 {
881
882
883
884
885
886
887
888
889
890
891 struct thread_struct *thr = &tsk->thread;
892
893 if (!thr->regs)
894 return;
895
896 if (!MSR_TM_ACTIVE(thr->regs->msr))
897 goto out_and_saveregs;
898
899 WARN_ON(tm_suspend_disabled);
900
901 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
902 "ccr=%lx, msr=%lx, trap=%lx)\n",
903 tsk->pid, thr->regs->nip,
904 thr->regs->ccr, thr->regs->msr,
905 thr->regs->trap);
906
907 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
908
909 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
910 tsk->pid);
911
912 out_and_saveregs:
913
914
915
916
917
918 tm_save_sprs(thr);
919 }
920
921 extern void __tm_recheckpoint(struct thread_struct *thread);
922
923 void tm_recheckpoint(struct thread_struct *thread)
924 {
925 unsigned long flags;
926
927 if (!(thread->regs->msr & MSR_TM))
928 return;
929
930
931
932
933
934 local_irq_save(flags);
935 hard_irq_disable();
936
937
938
939
940 tm_restore_sprs(thread);
941
942 __tm_recheckpoint(thread);
943
944 local_irq_restore(flags);
945 }
946
947 static inline void tm_recheckpoint_new_task(struct task_struct *new)
948 {
949 if (!cpu_has_feature(CPU_FTR_TM))
950 return;
951
952
953
954
955
956
957
958
959
960 if (!tm_enabled(new))
961 return;
962
963 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
964 tm_restore_sprs(&new->thread);
965 return;
966 }
967
968 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
969 new->pid, new->thread.regs->msr);
970
971 tm_recheckpoint(&new->thread);
972
973
974
975
976
977
978 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
979
980 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
981 "(kernel msr 0x%lx)\n",
982 new->pid, mfmsr());
983 }
984
985 static inline void __switch_to_tm(struct task_struct *prev,
986 struct task_struct *new)
987 {
988 if (cpu_has_feature(CPU_FTR_TM)) {
989 if (tm_enabled(prev) || tm_enabled(new))
990 tm_enable();
991
992 if (tm_enabled(prev)) {
993 prev->thread.load_tm++;
994 tm_reclaim_task(prev);
995 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
996 prev->thread.regs->msr &= ~MSR_TM;
997 }
998
999 tm_recheckpoint_new_task(new);
1000 }
1001 }
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 void restore_tm_state(struct pt_regs *regs)
1018 {
1019 unsigned long msr_diff;
1020
1021
1022
1023
1024
1025
1026
1027 clear_thread_flag(TIF_RESTORE_TM);
1028 if (!MSR_TM_ACTIVE(regs->msr))
1029 return;
1030
1031 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1032 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1033
1034
1035 if (msr_diff & MSR_FP)
1036 current->thread.load_fp = 1;
1037 #ifdef CONFIG_ALTIVEC
1038 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1039 current->thread.load_vec = 1;
1040 #endif
1041 restore_math(regs);
1042
1043 regs->msr |= msr_diff;
1044 }
1045
1046 #else
1047 #define tm_recheckpoint_new_task(new)
1048 #define __switch_to_tm(prev, new)
1049 #endif
1050
1051 static inline void save_sprs(struct thread_struct *t)
1052 {
1053 #ifdef CONFIG_ALTIVEC
1054 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1055 t->vrsave = mfspr(SPRN_VRSAVE);
1056 #endif
1057 #ifdef CONFIG_PPC_BOOK3S_64
1058 if (cpu_has_feature(CPU_FTR_DSCR))
1059 t->dscr = mfspr(SPRN_DSCR);
1060
1061 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1062 t->bescr = mfspr(SPRN_BESCR);
1063 t->ebbhr = mfspr(SPRN_EBBHR);
1064 t->ebbrr = mfspr(SPRN_EBBRR);
1065
1066 t->fscr = mfspr(SPRN_FSCR);
1067
1068
1069
1070
1071
1072
1073
1074 t->tar = mfspr(SPRN_TAR);
1075 }
1076 #endif
1077
1078 thread_pkey_regs_save(t);
1079 }
1080
1081 static inline void restore_sprs(struct thread_struct *old_thread,
1082 struct thread_struct *new_thread)
1083 {
1084 #ifdef CONFIG_ALTIVEC
1085 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1086 old_thread->vrsave != new_thread->vrsave)
1087 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1088 #endif
1089 #ifdef CONFIG_PPC_BOOK3S_64
1090 if (cpu_has_feature(CPU_FTR_DSCR)) {
1091 u64 dscr = get_paca()->dscr_default;
1092 if (new_thread->dscr_inherit)
1093 dscr = new_thread->dscr;
1094
1095 if (old_thread->dscr != dscr)
1096 mtspr(SPRN_DSCR, dscr);
1097 }
1098
1099 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1100 if (old_thread->bescr != new_thread->bescr)
1101 mtspr(SPRN_BESCR, new_thread->bescr);
1102 if (old_thread->ebbhr != new_thread->ebbhr)
1103 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1104 if (old_thread->ebbrr != new_thread->ebbrr)
1105 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1106
1107 if (old_thread->fscr != new_thread->fscr)
1108 mtspr(SPRN_FSCR, new_thread->fscr);
1109
1110 if (old_thread->tar != new_thread->tar)
1111 mtspr(SPRN_TAR, new_thread->tar);
1112 }
1113
1114 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1115 old_thread->tidr != new_thread->tidr)
1116 mtspr(SPRN_TIDR, new_thread->tidr);
1117 #endif
1118
1119 thread_pkey_regs_restore(new_thread, old_thread);
1120 }
1121
1122 struct task_struct *__switch_to(struct task_struct *prev,
1123 struct task_struct *new)
1124 {
1125 struct thread_struct *new_thread, *old_thread;
1126 struct task_struct *last;
1127 #ifdef CONFIG_PPC_BOOK3S_64
1128 struct ppc64_tlb_batch *batch;
1129 #endif
1130
1131 new_thread = &new->thread;
1132 old_thread = ¤t->thread;
1133
1134 WARN_ON(!irqs_disabled());
1135
1136 #ifdef CONFIG_PPC_BOOK3S_64
1137 batch = this_cpu_ptr(&ppc64_tlb_batch);
1138 if (batch->active) {
1139 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1140 if (batch->index)
1141 __flush_tlb_pending(batch);
1142 batch->active = 0;
1143 }
1144 #endif
1145
1146 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1147 switch_booke_debug_regs(&new->thread.debug);
1148 #else
1149
1150
1151
1152
1153 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1154 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1155 __set_breakpoint(&new->thread.hw_brk);
1156 #endif
1157 #endif
1158
1159
1160
1161
1162
1163 save_sprs(&prev->thread);
1164
1165
1166 giveup_all(prev);
1167
1168 __switch_to_tm(prev, new);
1169
1170 if (!radix_enabled()) {
1171
1172
1173
1174
1175
1176 hard_irq_disable();
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186 restore_sprs(old_thread, new_thread);
1187
1188 last = _switch(old_thread, new_thread);
1189
1190 #ifdef CONFIG_PPC_BOOK3S_64
1191 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1192 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1193 batch = this_cpu_ptr(&ppc64_tlb_batch);
1194 batch->active = 1;
1195 }
1196
1197 if (current->thread.regs) {
1198 restore_math(current->thread.regs);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 if (current->thread.used_vas)
1209 asm volatile(PPC_CP_ABORT);
1210 }
1211 #endif
1212
1213 return last;
1214 }
1215
1216 #define NR_INSN_TO_PRINT 16
1217
1218 static void show_instructions(struct pt_regs *regs)
1219 {
1220 int i;
1221 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1222
1223 printk("Instruction dump:");
1224
1225 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1226 int instr;
1227
1228 if (!(i % 8))
1229 pr_cont("\n");
1230
1231 #if !defined(CONFIG_BOOKE)
1232
1233
1234
1235 if (!(regs->msr & MSR_IR))
1236 pc = (unsigned long)phys_to_virt(pc);
1237 #endif
1238
1239 if (!__kernel_text_address(pc) ||
1240 probe_kernel_address((const void *)pc, instr)) {
1241 pr_cont("XXXXXXXX ");
1242 } else {
1243 if (regs->nip == pc)
1244 pr_cont("<%08x> ", instr);
1245 else
1246 pr_cont("%08x ", instr);
1247 }
1248
1249 pc += sizeof(int);
1250 }
1251
1252 pr_cont("\n");
1253 }
1254
1255 void show_user_instructions(struct pt_regs *regs)
1256 {
1257 unsigned long pc;
1258 int n = NR_INSN_TO_PRINT;
1259 struct seq_buf s;
1260 char buf[96];
1261
1262 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1263
1264
1265
1266
1267
1268 if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
1269 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1270 current->comm, current->pid);
1271 return;
1272 }
1273
1274 seq_buf_init(&s, buf, sizeof(buf));
1275
1276 while (n) {
1277 int i;
1278
1279 seq_buf_clear(&s);
1280
1281 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1282 int instr;
1283
1284 if (probe_kernel_address((const void *)pc, instr)) {
1285 seq_buf_printf(&s, "XXXXXXXX ");
1286 continue;
1287 }
1288 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1289 }
1290
1291 if (!seq_buf_has_overflowed(&s))
1292 pr_info("%s[%d]: code: %s\n", current->comm,
1293 current->pid, s.buffer);
1294 }
1295 }
1296
1297 struct regbit {
1298 unsigned long bit;
1299 const char *name;
1300 };
1301
1302 static struct regbit msr_bits[] = {
1303 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1304 {MSR_SF, "SF"},
1305 {MSR_HV, "HV"},
1306 #endif
1307 {MSR_VEC, "VEC"},
1308 {MSR_VSX, "VSX"},
1309 #ifdef CONFIG_BOOKE
1310 {MSR_CE, "CE"},
1311 #endif
1312 {MSR_EE, "EE"},
1313 {MSR_PR, "PR"},
1314 {MSR_FP, "FP"},
1315 {MSR_ME, "ME"},
1316 #ifdef CONFIG_BOOKE
1317 {MSR_DE, "DE"},
1318 #else
1319 {MSR_SE, "SE"},
1320 {MSR_BE, "BE"},
1321 #endif
1322 {MSR_IR, "IR"},
1323 {MSR_DR, "DR"},
1324 {MSR_PMM, "PMM"},
1325 #ifndef CONFIG_BOOKE
1326 {MSR_RI, "RI"},
1327 {MSR_LE, "LE"},
1328 #endif
1329 {0, NULL}
1330 };
1331
1332 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1333 {
1334 const char *s = "";
1335
1336 for (; bits->bit; ++bits)
1337 if (val & bits->bit) {
1338 pr_cont("%s%s", s, bits->name);
1339 s = sep;
1340 }
1341 }
1342
1343 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1344 static struct regbit msr_tm_bits[] = {
1345 {MSR_TS_T, "T"},
1346 {MSR_TS_S, "S"},
1347 {MSR_TM, "E"},
1348 {0, NULL}
1349 };
1350
1351 static void print_tm_bits(unsigned long val)
1352 {
1353
1354
1355
1356
1357
1358
1359
1360 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1361 pr_cont(",TM[");
1362 print_bits(val, msr_tm_bits, "");
1363 pr_cont("]");
1364 }
1365 }
1366 #else
1367 static void print_tm_bits(unsigned long val) {}
1368 #endif
1369
1370 static void print_msr_bits(unsigned long val)
1371 {
1372 pr_cont("<");
1373 print_bits(val, msr_bits, ",");
1374 print_tm_bits(val);
1375 pr_cont(">");
1376 }
1377
1378 #ifdef CONFIG_PPC64
1379 #define REG "%016lx"
1380 #define REGS_PER_LINE 4
1381 #define LAST_VOLATILE 13
1382 #else
1383 #define REG "%08lx"
1384 #define REGS_PER_LINE 8
1385 #define LAST_VOLATILE 12
1386 #endif
1387
1388 void show_regs(struct pt_regs * regs)
1389 {
1390 int i, trap;
1391
1392 show_regs_print_info(KERN_DEFAULT);
1393
1394 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1395 regs->nip, regs->link, regs->ctr);
1396 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1397 regs, regs->trap, print_tainted(), init_utsname()->release);
1398 printk("MSR: "REG" ", regs->msr);
1399 print_msr_bits(regs->msr);
1400 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1401 trap = TRAP(regs);
1402 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1403 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1404 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1405 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1406 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1407 #else
1408 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1409 #endif
1410 #ifdef CONFIG_PPC64
1411 pr_cont("IRQMASK: %lx ", regs->softe);
1412 #endif
1413 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1414 if (MSR_TM_ACTIVE(regs->msr))
1415 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1416 #endif
1417
1418 for (i = 0; i < 32; i++) {
1419 if ((i % REGS_PER_LINE) == 0)
1420 pr_cont("\nGPR%02d: ", i);
1421 pr_cont(REG " ", regs->gpr[i]);
1422 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1423 break;
1424 }
1425 pr_cont("\n");
1426 #ifdef CONFIG_KALLSYMS
1427
1428
1429
1430
1431 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1432 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1433 #endif
1434 show_stack(current, (unsigned long *) regs->gpr[1]);
1435 if (!user_mode(regs))
1436 show_instructions(regs);
1437 }
1438
1439 void flush_thread(void)
1440 {
1441 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1442 flush_ptrace_hw_breakpoint(current);
1443 #else
1444 set_debug_reg_defaults(¤t->thread);
1445 #endif
1446 }
1447
1448 #ifdef CONFIG_PPC_BOOK3S_64
1449 void arch_setup_new_exec(void)
1450 {
1451 if (radix_enabled())
1452 return;
1453 hash__setup_new_exec();
1454 }
1455 #endif
1456
1457 int set_thread_uses_vas(void)
1458 {
1459 #ifdef CONFIG_PPC_BOOK3S_64
1460 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1461 return -EINVAL;
1462
1463 current->thread.used_vas = 1;
1464
1465
1466
1467
1468
1469
1470
1471
1472 asm volatile(PPC_CP_ABORT);
1473
1474 #endif
1475 return 0;
1476 }
1477
1478 #ifdef CONFIG_PPC64
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 int set_thread_tidr(struct task_struct *t)
1513 {
1514 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1515 return -EINVAL;
1516
1517 if (t != current)
1518 return -EINVAL;
1519
1520 if (t->thread.tidr)
1521 return 0;
1522
1523 t->thread.tidr = (u16)task_pid_nr(t);
1524 mtspr(SPRN_TIDR, t->thread.tidr);
1525
1526 return 0;
1527 }
1528 EXPORT_SYMBOL_GPL(set_thread_tidr);
1529
1530 #endif
1531
1532 void
1533 release_thread(struct task_struct *t)
1534 {
1535 }
1536
1537
1538
1539
1540
1541 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1542 {
1543 flush_all_to_thread(src);
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 __switch_to_tm(src, src);
1555
1556 *dst = *src;
1557
1558 clear_task_ebb(dst);
1559
1560 return 0;
1561 }
1562
1563 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1564 {
1565 #ifdef CONFIG_PPC_BOOK3S_64
1566 unsigned long sp_vsid;
1567 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1568
1569 if (radix_enabled())
1570 return;
1571
1572 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1573 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1574 << SLB_VSID_SHIFT_1T;
1575 else
1576 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1577 << SLB_VSID_SHIFT;
1578 sp_vsid |= SLB_VSID_KERNEL | llp;
1579 p->thread.ksp_vsid = sp_vsid;
1580 #endif
1581 }
1582
1583
1584
1585
1586
1587
1588
1589
1590 int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
1591 unsigned long kthread_arg, struct task_struct *p,
1592 unsigned long tls)
1593 {
1594 struct pt_regs *childregs, *kregs;
1595 extern void ret_from_fork(void);
1596 extern void ret_from_kernel_thread(void);
1597 void (*f)(void);
1598 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1599 struct thread_info *ti = task_thread_info(p);
1600
1601 klp_init_thread_info(p);
1602
1603
1604 sp -= sizeof(struct pt_regs);
1605 childregs = (struct pt_regs *) sp;
1606 if (unlikely(p->flags & PF_KTHREAD)) {
1607
1608 memset(childregs, 0, sizeof(struct pt_regs));
1609 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1610
1611 if (usp)
1612 childregs->gpr[14] = ppc_function_entry((void *)usp);
1613 #ifdef CONFIG_PPC64
1614 clear_tsk_thread_flag(p, TIF_32BIT);
1615 childregs->softe = IRQS_ENABLED;
1616 #endif
1617 childregs->gpr[15] = kthread_arg;
1618 p->thread.regs = NULL;
1619 ti->flags |= _TIF_RESTOREALL;
1620 f = ret_from_kernel_thread;
1621 } else {
1622
1623 struct pt_regs *regs = current_pt_regs();
1624 CHECK_FULL_REGS(regs);
1625 *childregs = *regs;
1626 if (usp)
1627 childregs->gpr[1] = usp;
1628 p->thread.regs = childregs;
1629 childregs->gpr[3] = 0;
1630 if (clone_flags & CLONE_SETTLS) {
1631 #ifdef CONFIG_PPC64
1632 if (!is_32bit_task())
1633 childregs->gpr[13] = tls;
1634 else
1635 #endif
1636 childregs->gpr[2] = tls;
1637 }
1638
1639 f = ret_from_fork;
1640 }
1641 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1642 sp -= STACK_FRAME_OVERHEAD;
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 ((unsigned long *)sp)[0] = 0;
1653 sp -= sizeof(struct pt_regs);
1654 kregs = (struct pt_regs *) sp;
1655 sp -= STACK_FRAME_OVERHEAD;
1656 p->thread.ksp = sp;
1657 #ifdef CONFIG_PPC32
1658 p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1659 #endif
1660 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1661 p->thread.ptrace_bps[0] = NULL;
1662 #endif
1663
1664 p->thread.fp_save_area = NULL;
1665 #ifdef CONFIG_ALTIVEC
1666 p->thread.vr_save_area = NULL;
1667 #endif
1668
1669 setup_ksp_vsid(p, sp);
1670
1671 #ifdef CONFIG_PPC64
1672 if (cpu_has_feature(CPU_FTR_DSCR)) {
1673 p->thread.dscr_inherit = current->thread.dscr_inherit;
1674 p->thread.dscr = mfspr(SPRN_DSCR);
1675 }
1676 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1677 childregs->ppr = DEFAULT_PPR;
1678
1679 p->thread.tidr = 0;
1680 #endif
1681 kregs->nip = ppc_function_entry(f);
1682 return 0;
1683 }
1684
1685 void preload_new_slb_context(unsigned long start, unsigned long sp);
1686
1687
1688
1689
1690 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1691 {
1692 #ifdef CONFIG_PPC64
1693 unsigned long load_addr = regs->gpr[2];
1694
1695 #ifdef CONFIG_PPC_BOOK3S_64
1696 if (!radix_enabled())
1697 preload_new_slb_context(start, sp);
1698 #endif
1699 #endif
1700
1701
1702
1703
1704
1705 if (!current->thread.regs) {
1706 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1707 current->thread.regs = regs - 1;
1708 }
1709
1710 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1711
1712
1713
1714
1715
1716 if (MSR_TM_SUSPENDED(mfmsr()))
1717 tm_reclaim_current(0);
1718 #endif
1719
1720 memset(regs->gpr, 0, sizeof(regs->gpr));
1721 regs->ctr = 0;
1722 regs->link = 0;
1723 regs->xer = 0;
1724 regs->ccr = 0;
1725 regs->gpr[1] = sp;
1726
1727
1728
1729
1730
1731
1732 regs->trap &= ~1UL;
1733
1734 #ifdef CONFIG_PPC32
1735 regs->mq = 0;
1736 regs->nip = start;
1737 regs->msr = MSR_USER;
1738 #else
1739 if (!is_32bit_task()) {
1740 unsigned long entry;
1741
1742 if (is_elf2_task()) {
1743
1744 entry = start;
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 regs->gpr[12] = start;
1755
1756 set_thread_flag(TIF_RESTOREALL);
1757 } else {
1758 unsigned long toc;
1759
1760
1761
1762
1763
1764
1765
1766 __get_user(entry, (unsigned long __user *)start);
1767 __get_user(toc, (unsigned long __user *)start+1);
1768
1769
1770
1771
1772 if (load_addr != 0) {
1773 entry += load_addr;
1774 toc += load_addr;
1775 }
1776 regs->gpr[2] = toc;
1777 }
1778 regs->nip = entry;
1779 regs->msr = MSR_USER64;
1780 } else {
1781 regs->nip = start;
1782 regs->gpr[2] = 0;
1783 regs->msr = MSR_USER32;
1784 }
1785 #endif
1786 #ifdef CONFIG_VSX
1787 current->thread.used_vsr = 0;
1788 #endif
1789 current->thread.load_slb = 0;
1790 current->thread.load_fp = 0;
1791 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1792 current->thread.fp_save_area = NULL;
1793 #ifdef CONFIG_ALTIVEC
1794 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1795 current->thread.vr_state.vscr.u[3] = 0x00010000;
1796 current->thread.vr_save_area = NULL;
1797 current->thread.vrsave = 0;
1798 current->thread.used_vr = 0;
1799 current->thread.load_vec = 0;
1800 #endif
1801 #ifdef CONFIG_SPE
1802 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1803 current->thread.acc = 0;
1804 current->thread.spefscr = 0;
1805 current->thread.used_spe = 0;
1806 #endif
1807 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1808 current->thread.tm_tfhar = 0;
1809 current->thread.tm_texasr = 0;
1810 current->thread.tm_tfiar = 0;
1811 current->thread.load_tm = 0;
1812 #endif
1813
1814 thread_pkey_regs_init(¤t->thread);
1815 }
1816 EXPORT_SYMBOL(start_thread);
1817
1818 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1819 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1820
1821 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1822 {
1823 struct pt_regs *regs = tsk->thread.regs;
1824
1825
1826
1827
1828
1829 if (val & PR_FP_EXC_SW_ENABLE) {
1830 #ifdef CONFIG_SPE
1831 if (cpu_has_feature(CPU_FTR_SPE)) {
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1845 tsk->thread.fpexc_mode = val &
1846 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1847 return 0;
1848 } else {
1849 return -EINVAL;
1850 }
1851 #else
1852 return -EINVAL;
1853 #endif
1854 }
1855
1856
1857
1858
1859
1860
1861 if (val > PR_FP_EXC_PRECISE)
1862 return -EINVAL;
1863 tsk->thread.fpexc_mode = __pack_fe01(val);
1864 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1865 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1866 | tsk->thread.fpexc_mode;
1867 return 0;
1868 }
1869
1870 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1871 {
1872 unsigned int val;
1873
1874 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1875 #ifdef CONFIG_SPE
1876 if (cpu_has_feature(CPU_FTR_SPE)) {
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1890 val = tsk->thread.fpexc_mode;
1891 } else
1892 return -EINVAL;
1893 #else
1894 return -EINVAL;
1895 #endif
1896 else
1897 val = __unpack_fe01(tsk->thread.fpexc_mode);
1898 return put_user(val, (unsigned int __user *) adr);
1899 }
1900
1901 int set_endian(struct task_struct *tsk, unsigned int val)
1902 {
1903 struct pt_regs *regs = tsk->thread.regs;
1904
1905 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1906 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1907 return -EINVAL;
1908
1909 if (regs == NULL)
1910 return -EINVAL;
1911
1912 if (val == PR_ENDIAN_BIG)
1913 regs->msr &= ~MSR_LE;
1914 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1915 regs->msr |= MSR_LE;
1916 else
1917 return -EINVAL;
1918
1919 return 0;
1920 }
1921
1922 int get_endian(struct task_struct *tsk, unsigned long adr)
1923 {
1924 struct pt_regs *regs = tsk->thread.regs;
1925 unsigned int val;
1926
1927 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1928 !cpu_has_feature(CPU_FTR_REAL_LE))
1929 return -EINVAL;
1930
1931 if (regs == NULL)
1932 return -EINVAL;
1933
1934 if (regs->msr & MSR_LE) {
1935 if (cpu_has_feature(CPU_FTR_REAL_LE))
1936 val = PR_ENDIAN_LITTLE;
1937 else
1938 val = PR_ENDIAN_PPC_LITTLE;
1939 } else
1940 val = PR_ENDIAN_BIG;
1941
1942 return put_user(val, (unsigned int __user *)adr);
1943 }
1944
1945 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1946 {
1947 tsk->thread.align_ctl = val;
1948 return 0;
1949 }
1950
1951 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1952 {
1953 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1954 }
1955
1956 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1957 unsigned long nbytes)
1958 {
1959 unsigned long stack_page;
1960 unsigned long cpu = task_cpu(p);
1961
1962 stack_page = (unsigned long)hardirq_ctx[cpu];
1963 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1964 return 1;
1965
1966 stack_page = (unsigned long)softirq_ctx[cpu];
1967 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1968 return 1;
1969
1970 return 0;
1971 }
1972
1973 int validate_sp(unsigned long sp, struct task_struct *p,
1974 unsigned long nbytes)
1975 {
1976 unsigned long stack_page = (unsigned long)task_stack_page(p);
1977
1978 if (sp < THREAD_SIZE)
1979 return 0;
1980
1981 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1982 return 1;
1983
1984 return valid_irq_stack(sp, p, nbytes);
1985 }
1986
1987 EXPORT_SYMBOL(validate_sp);
1988
1989 static unsigned long __get_wchan(struct task_struct *p)
1990 {
1991 unsigned long ip, sp;
1992 int count = 0;
1993
1994 if (!p || p == current || p->state == TASK_RUNNING)
1995 return 0;
1996
1997 sp = p->thread.ksp;
1998 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1999 return 0;
2000
2001 do {
2002 sp = *(unsigned long *)sp;
2003 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2004 p->state == TASK_RUNNING)
2005 return 0;
2006 if (count > 0) {
2007 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2008 if (!in_sched_functions(ip))
2009 return ip;
2010 }
2011 } while (count++ < 16);
2012 return 0;
2013 }
2014
2015 unsigned long get_wchan(struct task_struct *p)
2016 {
2017 unsigned long ret;
2018
2019 if (!try_get_task_stack(p))
2020 return 0;
2021
2022 ret = __get_wchan(p);
2023
2024 put_task_stack(p);
2025
2026 return ret;
2027 }
2028
2029 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2030
2031 void show_stack(struct task_struct *tsk, unsigned long *stack)
2032 {
2033 unsigned long sp, ip, lr, newsp;
2034 int count = 0;
2035 int firstframe = 1;
2036 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2037 unsigned long ret_addr;
2038 int ftrace_idx = 0;
2039 #endif
2040
2041 if (tsk == NULL)
2042 tsk = current;
2043
2044 if (!try_get_task_stack(tsk))
2045 return;
2046
2047 sp = (unsigned long) stack;
2048 if (sp == 0) {
2049 if (tsk == current)
2050 sp = current_stack_pointer();
2051 else
2052 sp = tsk->thread.ksp;
2053 }
2054
2055 lr = 0;
2056 printk("Call Trace:\n");
2057 do {
2058 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2059 break;
2060
2061 stack = (unsigned long *) sp;
2062 newsp = stack[0];
2063 ip = stack[STACK_FRAME_LR_SAVE];
2064 if (!firstframe || ip != lr) {
2065 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2066 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2067 ret_addr = ftrace_graph_ret_addr(current,
2068 &ftrace_idx, ip, stack);
2069 if (ret_addr != ip)
2070 pr_cont(" (%pS)", (void *)ret_addr);
2071 #endif
2072 if (firstframe)
2073 pr_cont(" (unreliable)");
2074 pr_cont("\n");
2075 }
2076 firstframe = 0;
2077
2078
2079
2080
2081
2082 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2083 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2084 struct pt_regs *regs = (struct pt_regs *)
2085 (sp + STACK_FRAME_OVERHEAD);
2086 lr = regs->link;
2087 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2088 regs->trap, (void *)regs->nip, (void *)lr);
2089 firstframe = 1;
2090 }
2091
2092 sp = newsp;
2093 } while (count++ < kstack_depth_to_print);
2094
2095 put_task_stack(tsk);
2096 }
2097
2098 #ifdef CONFIG_PPC64
2099
2100 void notrace __ppc64_runlatch_on(void)
2101 {
2102 struct thread_info *ti = current_thread_info();
2103
2104 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2105
2106
2107
2108
2109
2110 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2111 } else {
2112 unsigned long ctrl;
2113
2114
2115
2116
2117
2118 ctrl = mfspr(SPRN_CTRLF);
2119 ctrl |= CTRL_RUNLATCH;
2120 mtspr(SPRN_CTRLT, ctrl);
2121 }
2122
2123 ti->local_flags |= _TLF_RUNLATCH;
2124 }
2125
2126
2127 void notrace __ppc64_runlatch_off(void)
2128 {
2129 struct thread_info *ti = current_thread_info();
2130
2131 ti->local_flags &= ~_TLF_RUNLATCH;
2132
2133 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2134 mtspr(SPRN_CTRLT, 0);
2135 } else {
2136 unsigned long ctrl;
2137
2138 ctrl = mfspr(SPRN_CTRLF);
2139 ctrl &= ~CTRL_RUNLATCH;
2140 mtspr(SPRN_CTRLT, ctrl);
2141 }
2142 }
2143 #endif
2144
2145 unsigned long arch_align_stack(unsigned long sp)
2146 {
2147 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2148 sp -= get_random_int() & ~PAGE_MASK;
2149 return sp & ~0xf;
2150 }
2151
2152 static inline unsigned long brk_rnd(void)
2153 {
2154 unsigned long rnd = 0;
2155
2156
2157 if (is_32bit_task())
2158 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2159 else
2160 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2161
2162 return rnd << PAGE_SHIFT;
2163 }
2164
2165 unsigned long arch_randomize_brk(struct mm_struct *mm)
2166 {
2167 unsigned long base = mm->brk;
2168 unsigned long ret;
2169
2170 #ifdef CONFIG_PPC_BOOK3S_64
2171
2172
2173
2174
2175
2176
2177
2178
2179 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2180 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2181 #endif
2182
2183 ret = PAGE_ALIGN(base + brk_rnd());
2184
2185 if (ret < mm->brk)
2186 return mm->brk;
2187
2188 return ret;
2189 }
2190