root/arch/x86/xen/xen-asm.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Asm versions of Xen pv-ops, suitable for direct use.
   4  *
   5  * We only bother with direct forms (ie, vcpu in percpu data) of the
   6  * operations here; the indirect forms are better handled in C.
   7  */
   8 
   9 #include <asm/asm-offsets.h>
  10 #include <asm/percpu.h>
  11 #include <asm/processor-flags.h>
  12 #include <asm/frame.h>
  13 #include <asm/asm.h>
  14 
  15 #include <linux/linkage.h>
  16 
  17 /*
  18  * Enable events.  This clears the event mask and tests the pending
  19  * event status with one and operation.  If there are pending events,
  20  * then enter the hypervisor to get them handled.
  21  */
  22 ENTRY(xen_irq_enable_direct)
  23         FRAME_BEGIN
  24         /* Unmask events */
  25         movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  26 
  27         /*
  28          * Preempt here doesn't matter because that will deal with any
  29          * pending interrupts.  The pending check may end up being run
  30          * on the wrong CPU, but that doesn't hurt.
  31          */
  32 
  33         /* Test for pending */
  34         testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  35         jz 1f
  36 
  37         call check_events
  38 1:
  39         FRAME_END
  40         ret
  41         ENDPROC(xen_irq_enable_direct)
  42 
  43 
  44 /*
  45  * Disabling events is simply a matter of making the event mask
  46  * non-zero.
  47  */
  48 ENTRY(xen_irq_disable_direct)
  49         movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  50         ret
  51 ENDPROC(xen_irq_disable_direct)
  52 
  53 /*
  54  * (xen_)save_fl is used to get the current interrupt enable status.
  55  * Callers expect the status to be in X86_EFLAGS_IF, and other bits
  56  * may be set in the return value.  We take advantage of this by
  57  * making sure that X86_EFLAGS_IF has the right value (and other bits
  58  * in that byte are 0), but other bits in the return value are
  59  * undefined.  We need to toggle the state of the bit, because Xen and
  60  * x86 use opposite senses (mask vs enable).
  61  */
  62 ENTRY(xen_save_fl_direct)
  63         testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  64         setz %ah
  65         addb %ah, %ah
  66         ret
  67         ENDPROC(xen_save_fl_direct)
  68 
  69 
  70 /*
  71  * In principle the caller should be passing us a value return from
  72  * xen_save_fl_direct, but for robustness sake we test only the
  73  * X86_EFLAGS_IF flag rather than the whole byte. After setting the
  74  * interrupt mask state, it checks for unmasked pending events and
  75  * enters the hypervisor to get them delivered if so.
  76  */
  77 ENTRY(xen_restore_fl_direct)
  78         FRAME_BEGIN
  79 #ifdef CONFIG_X86_64
  80         testw $X86_EFLAGS_IF, %di
  81 #else
  82         testb $X86_EFLAGS_IF>>8, %ah
  83 #endif
  84         setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  85         /*
  86          * Preempt here doesn't matter because that will deal with any
  87          * pending interrupts.  The pending check may end up being run
  88          * on the wrong CPU, but that doesn't hurt.
  89          */
  90 
  91         /* check for unmasked and pending */
  92         cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  93         jnz 1f
  94         call check_events
  95 1:
  96         FRAME_END
  97         ret
  98         ENDPROC(xen_restore_fl_direct)
  99 
 100 
 101 /*
 102  * Force an event check by making a hypercall, but preserve regs
 103  * before making the call.
 104  */
 105 ENTRY(check_events)
 106         FRAME_BEGIN
 107 #ifdef CONFIG_X86_32
 108         push %eax
 109         push %ecx
 110         push %edx
 111         call xen_force_evtchn_callback
 112         pop %edx
 113         pop %ecx
 114         pop %eax
 115 #else
 116         push %rax
 117         push %rcx
 118         push %rdx
 119         push %rsi
 120         push %rdi
 121         push %r8
 122         push %r9
 123         push %r10
 124         push %r11
 125         call xen_force_evtchn_callback
 126         pop %r11
 127         pop %r10
 128         pop %r9
 129         pop %r8
 130         pop %rdi
 131         pop %rsi
 132         pop %rdx
 133         pop %rcx
 134         pop %rax
 135 #endif
 136         FRAME_END
 137         ret
 138 ENDPROC(check_events)
 139 
 140 ENTRY(xen_read_cr2)
 141         FRAME_BEGIN
 142         _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
 143         _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
 144         FRAME_END
 145         ret
 146         ENDPROC(xen_read_cr2);
 147 
 148 ENTRY(xen_read_cr2_direct)
 149         FRAME_BEGIN
 150         _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
 151         FRAME_END
 152         ret
 153         ENDPROC(xen_read_cr2_direct);

/* [<][>][^][v][top][bottom][index][help] */