Lines Matching refs:regs

69 #define KVM86	((struct kernel_vm86_struct *)regs)
76 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) argument
77 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) argument
78 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) argument
79 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) argument
95 const struct kernel_vm86_regs *regs) in copy_vm86_regs_to_user() argument
103 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); in copy_vm86_regs_to_user()
104 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax, in copy_vm86_regs_to_user()
112 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, in copy_vm86_regs_from_user() argument
119 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); in copy_vm86_regs_from_user()
121 ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax, in copy_vm86_regs_from_user()
128 struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) in save_v86_state() argument
145 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); in save_v86_state()
146 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs); in save_v86_state()
217 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, in SYSCALL_DEFINE1()
219 sizeof(info.regs)); in SYSCALL_DEFINE1()
262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, in SYSCALL_DEFINE2()
264 sizeof(info.regs)); in SYSCALL_DEFINE2()
281 info->regs.pt.ds = 0; in do_sys_vm86()
282 info->regs.pt.es = 0; in do_sys_vm86()
283 info->regs.pt.fs = 0; in do_sys_vm86()
285 info->regs.pt.gs = 0; in do_sys_vm86()
293 VEFLAGS = info->regs.pt.flags; in do_sys_vm86()
294 info->regs.pt.flags &= SAFE_MASK; in do_sys_vm86()
295 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; in do_sys_vm86()
296 info->regs.pt.flags |= X86_VM_MASK; in do_sys_vm86()
346 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); in do_sys_vm86()
362 static inline void set_IF(struct kernel_vm86_regs *regs) in set_IF() argument
366 return_to_32bit(regs, VM86_STI); in set_IF()
369 static inline void clear_IF(struct kernel_vm86_regs *regs) in clear_IF() argument
374 static inline void clear_TF(struct kernel_vm86_regs *regs) in clear_TF() argument
376 regs->pt.flags &= ~X86_EFLAGS_TF; in clear_TF()
379 static inline void clear_AC(struct kernel_vm86_regs *regs) in clear_AC() argument
381 regs->pt.flags &= ~X86_EFLAGS_AC; in clear_AC()
396 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) in set_vflags_long() argument
399 set_flags(regs->pt.flags, flags, SAFE_MASK); in set_vflags_long()
401 set_IF(regs); in set_vflags_long()
403 clear_IF(regs); in set_vflags_long()
406 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) in set_vflags_short() argument
409 set_flags(regs->pt.flags, flags, SAFE_MASK); in set_vflags_short()
411 set_IF(regs); in set_vflags_short()
413 clear_IF(regs); in set_vflags_short()
416 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) in get_vflags() argument
418 unsigned long flags = regs->pt.flags & RETURN_MASK; in get_vflags()
516 static void do_int(struct kernel_vm86_regs *regs, int i, in do_int() argument
522 if (regs->pt.cs == BIOSSEG) in do_int()
526 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) in do_int()
533 pushw(ssp, sp, get_vflags(regs), cannot_handle); in do_int()
534 pushw(ssp, sp, regs->pt.cs, cannot_handle); in do_int()
535 pushw(ssp, sp, IP(regs), cannot_handle); in do_int()
536 regs->pt.cs = segoffs >> 16; in do_int()
537 SP(regs) -= 6; in do_int()
538 IP(regs) = segoffs & 0xffff; in do_int()
539 clear_TF(regs); in do_int()
540 clear_IF(regs); in do_int()
541 clear_AC(regs); in do_int()
545 return_to_32bit(regs, VM86_INTx + (i << 8)); in do_int()
548 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) in handle_vm86_trap() argument
559 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); in handle_vm86_trap()
570 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) in handle_vm86_fault() argument
583 return_to_32bit(regs, VM86_PICRETURN); \ in handle_vm86_fault()
585 handle_vm86_trap(regs, 0, 1); \ in handle_vm86_fault()
588 orig_flags = *(unsigned short *)&regs->pt.flags; in handle_vm86_fault()
590 csp = (unsigned char __user *) (regs->pt.cs << 4); in handle_vm86_fault()
591 ssp = (unsigned char __user *) (regs->pt.ss << 4); in handle_vm86_fault()
592 sp = SP(regs); in handle_vm86_fault()
593 ip = IP(regs); in handle_vm86_fault()
618 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); in handle_vm86_fault()
619 SP(regs) -= 4; in handle_vm86_fault()
621 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); in handle_vm86_fault()
622 SP(regs) -= 2; in handle_vm86_fault()
624 IP(regs) = ip; in handle_vm86_fault()
633 SP(regs) += 4; in handle_vm86_fault()
636 SP(regs) += 2; in handle_vm86_fault()
638 IP(regs) = ip; in handle_vm86_fault()
641 set_vflags_long(newflags, regs); in handle_vm86_fault()
643 set_vflags_short(newflags, regs); in handle_vm86_fault()
651 IP(regs) = ip; in handle_vm86_fault()
654 return_to_32bit(regs, VM86_INTx + (intno << 8)); in handle_vm86_fault()
656 do_int(regs, intno, ssp, sp); in handle_vm86_fault()
670 SP(regs) += 12; in handle_vm86_fault()
675 SP(regs) += 6; in handle_vm86_fault()
677 IP(regs) = newip; in handle_vm86_fault()
678 regs->pt.cs = newcs; in handle_vm86_fault()
681 set_vflags_long(newflags, regs); in handle_vm86_fault()
683 set_vflags_short(newflags, regs); in handle_vm86_fault()
690 IP(regs) = ip; in handle_vm86_fault()
691 clear_IF(regs); in handle_vm86_fault()
702 IP(regs) = ip; in handle_vm86_fault()
703 set_IF(regs); in handle_vm86_fault()
707 return_to_32bit(regs, VM86_UNKNOWN); in handle_vm86_fault()
723 return_to_32bit(regs, VM86_UNKNOWN); in handle_vm86_fault()