root/arch/x86/kernel/paravirt_patch.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. paravirt_patch_ident_64
  2. native_patch

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/stringify.h>
   3 
   4 #include <asm/paravirt.h>
   5 #include <asm/asm-offsets.h>
   6 
   7 #define PSTART(d, m)                                                    \
   8         patch_data_##d.m
   9 
  10 #define PEND(d, m)                                                      \
  11         (PSTART(d, m) + sizeof(patch_data_##d.m))
  12 
  13 #define PATCH(d, m, insn_buff, len)                                             \
  14         paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
  15 
  16 #define PATCH_CASE(ops, m, data, insn_buff, len)                                \
  17         case PARAVIRT_PATCH(ops.m):                                     \
  18                 return PATCH(data, ops##_##m, insn_buff, len)
  19 
  20 #ifdef CONFIG_PARAVIRT_XXL
  21 struct patch_xxl {
  22         const unsigned char     irq_irq_disable[1];
  23         const unsigned char     irq_irq_enable[1];
  24         const unsigned char     irq_save_fl[2];
  25         const unsigned char     mmu_read_cr2[3];
  26         const unsigned char     mmu_read_cr3[3];
  27         const unsigned char     mmu_write_cr3[3];
  28         const unsigned char     irq_restore_fl[2];
  29 # ifdef CONFIG_X86_64
  30         const unsigned char     cpu_wbinvd[2];
  31         const unsigned char     cpu_usergs_sysret64[6];
  32         const unsigned char     cpu_swapgs[3];
  33         const unsigned char     mov64[3];
  34 # else
  35         const unsigned char     cpu_iret[1];
  36 # endif
  37 };
  38 
  39 static const struct patch_xxl patch_data_xxl = {
  40         .irq_irq_disable        = { 0xfa },             // cli
  41         .irq_irq_enable         = { 0xfb },             // sti
  42         .irq_save_fl            = { 0x9c, 0x58 },       // pushf; pop %[re]ax
  43         .mmu_read_cr2           = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
  44         .mmu_read_cr3           = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
  45 # ifdef CONFIG_X86_64
  46         .mmu_write_cr3          = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
  47         .irq_restore_fl         = { 0x57, 0x9d },       // push %rdi; popfq
  48         .cpu_wbinvd             = { 0x0f, 0x09 },       // wbinvd
  49         .cpu_usergs_sysret64    = { 0x0f, 0x01, 0xf8,
  50                                     0x48, 0x0f, 0x07 }, // swapgs; sysretq
  51         .cpu_swapgs             = { 0x0f, 0x01, 0xf8 }, // swapgs
  52         .mov64                  = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
  53 # else
  54         .mmu_write_cr3          = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3
  55         .irq_restore_fl         = { 0x50, 0x9d },       // push %eax; popf
  56         .cpu_iret               = { 0xcf },             // iret
  57 # endif
  58 };
  59 
  60 unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
  61 {
  62 #ifdef CONFIG_X86_64
  63         return PATCH(xxl, mov64, insn_buff, len);
  64 #endif
  65         return 0;
  66 }
  67 # endif /* CONFIG_PARAVIRT_XXL */
  68 
  69 #ifdef CONFIG_PARAVIRT_SPINLOCKS
  70 struct patch_lock {
  71         unsigned char queued_spin_unlock[3];
  72         unsigned char vcpu_is_preempted[2];
  73 };
  74 
  75 static const struct patch_lock patch_data_lock = {
  76         .vcpu_is_preempted      = { 0x31, 0xc0 },       // xor %eax, %eax
  77 
  78 # ifdef CONFIG_X86_64
  79         .queued_spin_unlock     = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
  80 # else
  81         .queued_spin_unlock     = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
  82 # endif
  83 };
  84 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
  85 
  86 unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
  87                           unsigned int len)
  88 {
  89         switch (type) {
  90 
  91 #ifdef CONFIG_PARAVIRT_XXL
  92         PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
  93         PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
  94         PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
  95         PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
  96 
  97         PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
  98         PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
  99         PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
 100 
 101 # ifdef CONFIG_X86_64
 102         PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
 103         PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
 104         PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
 105 # else
 106         PATCH_CASE(cpu, iret, xxl, insn_buff, len);
 107 # endif
 108 #endif
 109 
 110 #ifdef CONFIG_PARAVIRT_SPINLOCKS
 111         case PARAVIRT_PATCH(lock.queued_spin_unlock):
 112                 if (pv_is_native_spin_unlock())
 113                         return PATCH(lock, queued_spin_unlock, insn_buff, len);
 114                 break;
 115 
 116         case PARAVIRT_PATCH(lock.vcpu_is_preempted):
 117                 if (pv_is_native_vcpu_is_preempted())
 118                         return PATCH(lock, vcpu_is_preempted, insn_buff, len);
 119                 break;
 120 #endif
 121         default:
 122                 break;
 123         }
 124 
 125         return paravirt_patch_default(type, insn_buff, addr, len);
 126 }

/* [<][>][^][v][top][bottom][index][help] */