root/arch/x86/xen/xen-asm_32.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Asm versions of Xen pv-ops, suitable for direct use.
   4  *
   5  * We only bother with direct forms (ie, vcpu in pda) of the
   6  * operations here; the indirect forms are better handled in C.
   7  */
   8 
   9 #include <asm/thread_info.h>
  10 #include <asm/processor-flags.h>
  11 #include <asm/segment.h>
  12 #include <asm/asm.h>
  13 
  14 #include <xen/interface/xen.h>
  15 
  16 #include <linux/linkage.h>
  17 
  18 /* Pseudo-flag used for virtual NMI, which we don't implement yet */
  19 #define XEN_EFLAGS_NMI  0x80000000
  20 
  21 /*
  22  * This is run where a normal iret would be run, with the same stack setup:
  23  *      8: eflags
  24  *      4: cs
  25  *      esp-> 0: eip
  26  *
  27  * This attempts to make sure that any pending events are dealt with
  28  * on return to usermode, but there is a small window in which an
  29  * event can happen just before entering usermode.  If the nested
  30  * interrupt ends up setting one of the TIF_WORK_MASK pending work
  31  * flags, they will not be tested again before returning to
  32  * usermode. This means that a process can end up with pending work,
  33  * which will be unprocessed until the process enters and leaves the
  34  * kernel again, which could be an unbounded amount of time.  This
  35  * means that a pending signal or reschedule event could be
  36  * indefinitely delayed.
  37  *
  38  * The fix is to notice a nested interrupt in the critical window, and
  39  * if one occurs, then fold the nested interrupt into the current
  40  * interrupt stack frame, and re-process it iteratively rather than
  41  * recursively.  This means that it will exit via the normal path, and
  42  * all pending work will be dealt with appropriately.
  43  *
  44  * Because the nested interrupt handler needs to deal with the current
  45  * stack state in whatever form its in, we keep things simple by only
  46  * using a single register which is pushed/popped on the stack.
  47  */
  48 
  49 .macro POP_FS
  50 1:
  51         popw %fs
  52 .pushsection .fixup, "ax"
  53 2:      movw $0, (%esp)
  54         jmp 1b
  55 .popsection
  56         _ASM_EXTABLE(1b,2b)
  57 .endm
  58 
  59 ENTRY(xen_iret)
  60         /* test eflags for special cases */
  61         testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
  62         jnz hyper_iret
  63 
  64         push %eax
  65         ESP_OFFSET=4    # bytes pushed onto stack
  66 
  67         /* Store vcpu_info pointer for easy access */
  68 #ifdef CONFIG_SMP
  69         pushw %fs
  70         movl $(__KERNEL_PERCPU), %eax
  71         movl %eax, %fs
  72         movl %fs:xen_vcpu, %eax
  73         POP_FS
  74 #else
  75         movl %ss:xen_vcpu, %eax
  76 #endif
  77 
  78         /* check IF state we're restoring */
  79         testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
  80 
  81         /*
  82          * Maybe enable events.  Once this happens we could get a
  83          * recursive event, so the critical region starts immediately
  84          * afterwards.  However, if that happens we don't end up
  85          * resuming the code, so we don't have to be worried about
  86          * being preempted to another CPU.
  87          */
  88         setz %ss:XEN_vcpu_info_mask(%eax)
  89 xen_iret_start_crit:
  90 
  91         /* check for unmasked and pending */
  92         cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
  93 
  94         /*
  95          * If there's something pending, mask events again so we can
  96          * jump back into xen_hypervisor_callback. Otherwise do not
  97          * touch XEN_vcpu_info_mask.
  98          */
  99         jne 1f
 100         movb $1, %ss:XEN_vcpu_info_mask(%eax)
 101 
 102 1:      popl %eax
 103 
 104         /*
 105          * From this point on the registers are restored and the stack
 106          * updated, so we don't need to worry about it if we're
 107          * preempted
 108          */
 109 iret_restore_end:
 110 
 111         /*
 112          * Jump to hypervisor_callback after fixing up the stack.
 113          * Events are masked, so jumping out of the critical region is
 114          * OK.
 115          */
 116         je xen_hypervisor_callback
 117 
 118 1:      iret
 119 xen_iret_end_crit:
 120         _ASM_EXTABLE(1b, iret_exc)
 121 
 122 hyper_iret:
 123         /* put this out of line since its very rarely used */
 124         jmp hypercall_page + __HYPERVISOR_iret * 32
 125 
 126         .globl xen_iret_start_crit, xen_iret_end_crit
 127 
 128 /*
 129  * This is called by xen_hypervisor_callback in entry_32.S when it sees
 130  * that the EIP at the time of interrupt was between
 131  * xen_iret_start_crit and xen_iret_end_crit.
 132  *
 133  * The stack format at this point is:
 134  *      ----------------
 135  *       ss             : (ss/esp may be present if we came from usermode)
 136  *       esp            :
 137  *       eflags         }  outer exception info
 138  *       cs             }
 139  *       eip            }
 140  *      ----------------
 141  *       eax            :  outer eax if it hasn't been restored
 142  *      ----------------
 143  *       eflags         }
 144  *       cs             }  nested exception info
 145  *       eip            }
 146  *       return address : (into xen_hypervisor_callback)
 147  *
 148  * In order to deliver the nested exception properly, we need to discard the
 149  * nested exception frame such that when we handle the exception, we do it
 150  * in the context of the outer exception rather than starting a new one.
 151  *
 152  * The only caveat is that if the outer eax hasn't been restored yet (i.e.
 153  * it's still on stack), we need to restore its value here.
 154  */
 155 ENTRY(xen_iret_crit_fixup)
 156         /*
 157          * Paranoia: Make sure we're really coming from kernel space.
 158          * One could imagine a case where userspace jumps into the
 159          * critical range address, but just before the CPU delivers a
 160          * PF, it decides to deliver an interrupt instead.  Unlikely?
 161          * Definitely.  Easy to avoid?  Yes.
 162          */
 163         testb $2, 2*4(%esp)             /* nested CS */
 164         jnz 2f
 165 
 166         /*
 167          * If eip is before iret_restore_end then stack
 168          * hasn't been restored yet.
 169          */
 170         cmpl $iret_restore_end, 1*4(%esp)
 171         jae 1f
 172 
 173         movl 4*4(%esp), %eax            /* load outer EAX */
 174         ret $4*4                        /* discard nested EIP, CS, and EFLAGS as
 175                                          * well as the just restored EAX */
 176 
 177 1:
 178         ret $3*4                        /* discard nested EIP, CS, and EFLAGS */
 179 
 180 2:
 181         ret
 182 END(xen_iret_crit_fixup)

/* [<][>][^][v][top][bottom][index][help] */