1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/ftrace.h>
38
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
44 #include <asm/desc.h>
45 #include <asm/proto.h>
46 #include <asm/ia32.h>
47 #include <asm/idle.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
52
53 asmlinkage extern void ret_from_fork(void);
54
55 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
56
57 /* Prints also some state that isn't saved in the pt_regs */
__show_regs(struct pt_regs * regs,int all)58 void __show_regs(struct pt_regs *regs, int all)
59 {
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
61 unsigned long d0, d1, d2, d3, d6, d7;
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
64
65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
66 printk_address(regs->ip);
67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
68 regs->sp, regs->flags);
69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
70 regs->ax, regs->bx, regs->cx);
71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
72 regs->dx, regs->si, regs->di);
73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
74 regs->bp, regs->r8, regs->r9);
75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
76 regs->r10, regs->r11, regs->r12);
77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
78 regs->r13, regs->r14, regs->r15);
79
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
85
86 rdmsrl(MSR_FS_BASE, fs);
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
89
90 if (!all)
91 return;
92
93 cr0 = read_cr0();
94 cr2 = read_cr2();
95 cr3 = read_cr3();
96 cr4 = __read_cr4();
97
98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
99 fs, fsindex, gs, gsindex, shadowgs);
100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
101 es, cr0);
102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
103 cr4);
104
105 get_debugreg(d0, 0);
106 get_debugreg(d1, 1);
107 get_debugreg(d2, 2);
108 get_debugreg(d3, 3);
109 get_debugreg(d6, 6);
110 get_debugreg(d7, 7);
111
112 /* Only print out debug registers if they are in their non-default state. */
113 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114 (d6 == DR6_RESERVED) && (d7 == 0x400))
115 return;
116
117 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
118 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
119
120 }
121
release_thread(struct task_struct * dead_task)122 void release_thread(struct task_struct *dead_task)
123 {
124 if (dead_task->mm) {
125 #ifdef CONFIG_MODIFY_LDT_SYSCALL
126 if (dead_task->mm->context.ldt) {
127 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
128 dead_task->comm,
129 dead_task->mm->context.ldt,
130 dead_task->mm->context.ldt->size);
131 BUG();
132 }
133 #endif
134 }
135 }
136
set_32bit_tls(struct task_struct * t,int tls,u32 addr)137 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
138 {
139 struct user_desc ud = {
140 .base_addr = addr,
141 .limit = 0xfffff,
142 .seg_32bit = 1,
143 .limit_in_pages = 1,
144 .useable = 1,
145 };
146 struct desc_struct *desc = t->thread.tls_array;
147 desc += tls;
148 fill_ldt(desc, &ud);
149 }
150
read_32bit_tls(struct task_struct * t,int tls)151 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
152 {
153 return get_desc_base(&t->thread.tls_array[tls]);
154 }
155
copy_thread_tls(unsigned long clone_flags,unsigned long sp,unsigned long arg,struct task_struct * p,unsigned long tls)156 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
157 unsigned long arg, struct task_struct *p, unsigned long tls)
158 {
159 int err;
160 struct pt_regs *childregs;
161 struct task_struct *me = current;
162
163 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
164 childregs = task_pt_regs(p);
165 p->thread.sp = (unsigned long) childregs;
166 set_tsk_thread_flag(p, TIF_FORK);
167 p->thread.io_bitmap_ptr = NULL;
168
169 savesegment(gs, p->thread.gsindex);
170 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
171 savesegment(fs, p->thread.fsindex);
172 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
173 savesegment(es, p->thread.es);
174 savesegment(ds, p->thread.ds);
175 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
176
177 if (unlikely(p->flags & PF_KTHREAD)) {
178 /* kernel thread */
179 memset(childregs, 0, sizeof(struct pt_regs));
180 childregs->sp = (unsigned long)childregs;
181 childregs->ss = __KERNEL_DS;
182 childregs->bx = sp; /* function */
183 childregs->bp = arg;
184 childregs->orig_ax = -1;
185 childregs->cs = __KERNEL_CS | get_kernel_rpl();
186 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
187 return 0;
188 }
189 *childregs = *current_pt_regs();
190
191 childregs->ax = 0;
192 if (sp)
193 childregs->sp = sp;
194
195 err = -ENOMEM;
196 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
197 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
198 IO_BITMAP_BYTES, GFP_KERNEL);
199 if (!p->thread.io_bitmap_ptr) {
200 p->thread.io_bitmap_max = 0;
201 return -ENOMEM;
202 }
203 set_tsk_thread_flag(p, TIF_IO_BITMAP);
204 }
205
206 /*
207 * Set a new TLS for the child thread?
208 */
209 if (clone_flags & CLONE_SETTLS) {
210 #ifdef CONFIG_IA32_EMULATION
211 if (is_ia32_task())
212 err = do_set_thread_area(p, -1,
213 (struct user_desc __user *)tls, 0);
214 else
215 #endif
216 err = do_arch_prctl(p, ARCH_SET_FS, tls);
217 if (err)
218 goto out;
219 }
220 err = 0;
221 out:
222 if (err && p->thread.io_bitmap_ptr) {
223 kfree(p->thread.io_bitmap_ptr);
224 p->thread.io_bitmap_max = 0;
225 }
226
227 return err;
228 }
229
230 static void
start_thread_common(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp,unsigned int _cs,unsigned int _ss,unsigned int _ds)231 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
232 unsigned long new_sp,
233 unsigned int _cs, unsigned int _ss, unsigned int _ds)
234 {
235 loadsegment(fs, 0);
236 loadsegment(es, _ds);
237 loadsegment(ds, _ds);
238 load_gs_index(0);
239 regs->ip = new_ip;
240 regs->sp = new_sp;
241 regs->cs = _cs;
242 regs->ss = _ss;
243 regs->flags = X86_EFLAGS_IF;
244 force_iret();
245 }
246
247 void
start_thread(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp)248 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
249 {
250 start_thread_common(regs, new_ip, new_sp,
251 __USER_CS, __USER_DS, 0);
252 }
253
254 #ifdef CONFIG_COMPAT
compat_start_thread(struct pt_regs * regs,u32 new_ip,u32 new_sp)255 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
256 {
257 start_thread_common(regs, new_ip, new_sp,
258 test_thread_flag(TIF_X32)
259 ? __USER_CS : __USER32_CS,
260 __USER_DS, __USER_DS);
261 }
262 #endif
263
264 /*
265 * switch_to(x,y) should switch tasks from x to y.
266 *
267 * This could still be optimized:
268 * - fold all the options into a flag word and test it with a single test.
269 * - could test fs/gs bitsliced
270 *
271 * Kprobes not supported here. Set the probe on schedule instead.
272 * Function graph tracer not supported too.
273 */
274 __visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct * prev_p,struct task_struct * next_p)275 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
276 {
277 struct thread_struct *prev = &prev_p->thread;
278 struct thread_struct *next = &next_p->thread;
279 struct fpu *prev_fpu = &prev->fpu;
280 struct fpu *next_fpu = &next->fpu;
281 int cpu = smp_processor_id();
282 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
283 unsigned fsindex, gsindex;
284 fpu_switch_t fpu_switch;
285
286 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
287
288 /* We must save %fs and %gs before load_TLS() because
289 * %fs and %gs may be cleared by load_TLS().
290 *
291 * (e.g. xen_load_tls())
292 */
293 savesegment(fs, fsindex);
294 savesegment(gs, gsindex);
295
296 /*
297 * Load TLS before restoring any segments so that segment loads
298 * reference the correct GDT entries.
299 */
300 load_TLS(next, cpu);
301
302 /*
303 * Leave lazy mode, flushing any hypercalls made here. This
304 * must be done after loading TLS entries in the GDT but before
305 * loading segments that might reference them, and and it must
306 * be done before fpu__restore(), so the TS bit is up to
307 * date.
308 */
309 arch_end_context_switch(next_p);
310
311 /* Switch DS and ES.
312 *
313 * Reading them only returns the selectors, but writing them (if
314 * nonzero) loads the full descriptor from the GDT or LDT. The
315 * LDT for next is loaded in switch_mm, and the GDT is loaded
316 * above.
317 *
318 * We therefore need to write new values to the segment
319 * registers on every context switch unless both the new and old
320 * values are zero.
321 *
322 * Note that we don't need to do anything for CS and SS, as
323 * those are saved and restored as part of pt_regs.
324 */
325 savesegment(es, prev->es);
326 if (unlikely(next->es | prev->es))
327 loadsegment(es, next->es);
328
329 savesegment(ds, prev->ds);
330 if (unlikely(next->ds | prev->ds))
331 loadsegment(ds, next->ds);
332
333 /*
334 * Switch FS and GS.
335 *
336 * These are even more complicated than DS and ES: they have
337 * 64-bit bases are that controlled by arch_prctl. Those bases
338 * only differ from the values in the GDT or LDT if the selector
339 * is 0.
340 *
341 * Loading the segment register resets the hidden base part of
342 * the register to 0 or the value from the GDT / LDT. If the
343 * next base address zero, writing 0 to the segment register is
344 * much faster than using wrmsr to explicitly zero the base.
345 *
346 * The thread_struct.fs and thread_struct.gs values are 0
347 * if the fs and gs bases respectively are not overridden
348 * from the values implied by fsindex and gsindex. They
349 * are nonzero, and store the nonzero base addresses, if
350 * the bases are overridden.
351 *
352 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
353 * be impossible.
354 *
355 * Therefore we need to reload the segment registers if either
356 * the old or new selector is nonzero, and we need to override
357 * the base address if next thread expects it to be overridden.
358 *
359 * This code is unnecessarily slow in the case where the old and
360 * new indexes are zero and the new base is nonzero -- it will
361 * unnecessarily write 0 to the selector before writing the new
362 * base address.
363 *
364 * Note: This all depends on arch_prctl being the only way that
365 * user code can override the segment base. Once wrfsbase and
366 * wrgsbase are enabled, most of this code will need to change.
367 */
368 if (unlikely(fsindex | next->fsindex | prev->fs)) {
369 loadsegment(fs, next->fsindex);
370
371 /*
372 * If user code wrote a nonzero value to FS, then it also
373 * cleared the overridden base address.
374 *
375 * XXX: if user code wrote 0 to FS and cleared the base
376 * address itself, we won't notice and we'll incorrectly
377 * restore the prior base address next time we reschdule
378 * the process.
379 */
380 if (fsindex)
381 prev->fs = 0;
382 }
383 if (next->fs)
384 wrmsrl(MSR_FS_BASE, next->fs);
385 prev->fsindex = fsindex;
386
387 if (unlikely(gsindex | next->gsindex | prev->gs)) {
388 load_gs_index(next->gsindex);
389
390 /* This works (and fails) the same way as fsindex above. */
391 if (gsindex)
392 prev->gs = 0;
393 }
394 if (next->gs)
395 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
396 prev->gsindex = gsindex;
397
398 switch_fpu_finish(next_fpu, fpu_switch);
399
400 /*
401 * Switch the PDA and FPU contexts.
402 */
403 this_cpu_write(current_task, next_p);
404
405 /* Reload esp0 and ss1. This changes current_thread_info(). */
406 load_sp0(tss, next);
407
408 /*
409 * Now maybe reload the debug registers and handle I/O bitmaps
410 */
411 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
412 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
413 __switch_to_xtra(prev_p, next_p, tss);
414
415 #ifdef CONFIG_XEN
416 /*
417 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
418 * current_pt_regs()->flags may not match the current task's
419 * intended IOPL. We need to switch it manually.
420 */
421 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
422 prev->iopl != next->iopl))
423 xen_set_iopl_mask(next->iopl);
424 #endif
425
426 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
427 /*
428 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
429 * does not update the cached descriptor. As a result, if we
430 * do SYSRET while SS is NULL, we'll end up in user mode with
431 * SS apparently equal to __USER_DS but actually unusable.
432 *
433 * The straightforward workaround would be to fix it up just
434 * before SYSRET, but that would slow down the system call
435 * fast paths. Instead, we ensure that SS is never NULL in
436 * system call context. We do this by replacing NULL SS
437 * selectors at every context switch. SYSCALL sets up a valid
438 * SS, so the only way to get NULL is to re-enter the kernel
439 * from CPL 3 through an interrupt. Since that can't happen
440 * in the same task as a running syscall, we are guaranteed to
441 * context switch between every interrupt vector entry and a
442 * subsequent SYSRET.
443 *
444 * We read SS first because SS reads are much faster than
445 * writes. Out of caution, we force SS to __KERNEL_DS even if
446 * it previously had a different non-NULL value.
447 */
448 unsigned short ss_sel;
449 savesegment(ss, ss_sel);
450 if (ss_sel != __KERNEL_DS)
451 loadsegment(ss, __KERNEL_DS);
452 }
453
454 return prev_p;
455 }
456
set_personality_64bit(void)457 void set_personality_64bit(void)
458 {
459 /* inherit personality from parent */
460
461 /* Make sure to be in 64bit mode */
462 clear_thread_flag(TIF_IA32);
463 clear_thread_flag(TIF_ADDR32);
464 clear_thread_flag(TIF_X32);
465
466 /* Ensure the corresponding mm is not marked. */
467 if (current->mm)
468 current->mm->context.ia32_compat = 0;
469
470 /* TBD: overwrites user setup. Should have two bits.
471 But 64bit processes have always behaved this way,
472 so it's not too bad. The main problem is just that
473 32bit childs are affected again. */
474 current->personality &= ~READ_IMPLIES_EXEC;
475 }
476
set_personality_ia32(bool x32)477 void set_personality_ia32(bool x32)
478 {
479 /* inherit personality from parent */
480
481 /* Make sure to be in 32bit mode */
482 set_thread_flag(TIF_ADDR32);
483
484 /* Mark the associated mm as containing 32-bit tasks. */
485 if (x32) {
486 clear_thread_flag(TIF_IA32);
487 set_thread_flag(TIF_X32);
488 if (current->mm)
489 current->mm->context.ia32_compat = TIF_X32;
490 current->personality &= ~READ_IMPLIES_EXEC;
491 /* is_compat_task() uses the presence of the x32
492 syscall bit flag to determine compat status */
493 current_thread_info()->status &= ~TS_COMPAT;
494 } else {
495 set_thread_flag(TIF_IA32);
496 clear_thread_flag(TIF_X32);
497 if (current->mm)
498 current->mm->context.ia32_compat = TIF_IA32;
499 current->personality |= force_personality32;
500 /* Prepare the first "return" to user space */
501 current_thread_info()->status |= TS_COMPAT;
502 }
503 }
504 EXPORT_SYMBOL_GPL(set_personality_ia32);
505
do_arch_prctl(struct task_struct * task,int code,unsigned long addr)506 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
507 {
508 int ret = 0;
509 int doit = task == current;
510 int cpu;
511
512 switch (code) {
513 case ARCH_SET_GS:
514 if (addr >= TASK_SIZE_OF(task))
515 return -EPERM;
516 cpu = get_cpu();
517 /* handle small bases via the GDT because that's faster to
518 switch. */
519 if (addr <= 0xffffffff) {
520 set_32bit_tls(task, GS_TLS, addr);
521 if (doit) {
522 load_TLS(&task->thread, cpu);
523 load_gs_index(GS_TLS_SEL);
524 }
525 task->thread.gsindex = GS_TLS_SEL;
526 task->thread.gs = 0;
527 } else {
528 task->thread.gsindex = 0;
529 task->thread.gs = addr;
530 if (doit) {
531 load_gs_index(0);
532 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
533 }
534 }
535 put_cpu();
536 break;
537 case ARCH_SET_FS:
538 /* Not strictly needed for fs, but do it for symmetry
539 with gs */
540 if (addr >= TASK_SIZE_OF(task))
541 return -EPERM;
542 cpu = get_cpu();
543 /* handle small bases via the GDT because that's faster to
544 switch. */
545 if (addr <= 0xffffffff) {
546 set_32bit_tls(task, FS_TLS, addr);
547 if (doit) {
548 load_TLS(&task->thread, cpu);
549 loadsegment(fs, FS_TLS_SEL);
550 }
551 task->thread.fsindex = FS_TLS_SEL;
552 task->thread.fs = 0;
553 } else {
554 task->thread.fsindex = 0;
555 task->thread.fs = addr;
556 if (doit) {
557 /* set the selector to 0 to not confuse
558 __switch_to */
559 loadsegment(fs, 0);
560 ret = wrmsrl_safe(MSR_FS_BASE, addr);
561 }
562 }
563 put_cpu();
564 break;
565 case ARCH_GET_FS: {
566 unsigned long base;
567 if (task->thread.fsindex == FS_TLS_SEL)
568 base = read_32bit_tls(task, FS_TLS);
569 else if (doit)
570 rdmsrl(MSR_FS_BASE, base);
571 else
572 base = task->thread.fs;
573 ret = put_user(base, (unsigned long __user *)addr);
574 break;
575 }
576 case ARCH_GET_GS: {
577 unsigned long base;
578 unsigned gsindex;
579 if (task->thread.gsindex == GS_TLS_SEL)
580 base = read_32bit_tls(task, GS_TLS);
581 else if (doit) {
582 savesegment(gs, gsindex);
583 if (gsindex)
584 rdmsrl(MSR_KERNEL_GS_BASE, base);
585 else
586 base = task->thread.gs;
587 } else
588 base = task->thread.gs;
589 ret = put_user(base, (unsigned long __user *)addr);
590 break;
591 }
592
593 default:
594 ret = -EINVAL;
595 break;
596 }
597
598 return ret;
599 }
600
sys_arch_prctl(int code,unsigned long addr)601 long sys_arch_prctl(int code, unsigned long addr)
602 {
603 return do_arch_prctl(current, code, addr);
604 }
605
KSTK_ESP(struct task_struct * task)606 unsigned long KSTK_ESP(struct task_struct *task)
607 {
608 return task_pt_regs(task)->sp;
609 }
610