root/arch/arm64/kvm/hyp/tlb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __tlb_switch_to_guest_vhe
  2. __tlb_switch_to_guest_nvhe
  3. __tlb_switch_to_guest
  4. __tlb_switch_to_host_vhe
  5. __tlb_switch_to_host_nvhe
  6. __tlb_switch_to_host
  7. __kvm_tlb_flush_vmid_ipa
  8. __kvm_tlb_flush_vmid
  9. __kvm_tlb_flush_local_vmid
  10. __kvm_flush_vm_context

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2015 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  */
   6 
   7 #include <linux/irqflags.h>
   8 
   9 #include <asm/kvm_hyp.h>
  10 #include <asm/kvm_mmu.h>
  11 #include <asm/tlbflush.h>
  12 
  13 struct tlb_inv_context {
  14         unsigned long   flags;
  15         u64             tcr;
  16         u64             sctlr;
  17 };
  18 
  19 static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
  20                                                  struct tlb_inv_context *cxt)
  21 {
  22         u64 val;
  23 
  24         local_irq_save(cxt->flags);
  25 
  26         if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
  27                 /*
  28                  * For CPUs that are affected by ARM erratum 1165522, we
  29                  * cannot trust stage-1 to be in a correct state at that
  30                  * point. Since we do not want to force a full load of the
  31                  * vcpu state, we prevent the EL1 page-table walker to
  32                  * allocate new TLBs. This is done by setting the EPD bits
  33                  * in the TCR_EL1 register. We also need to prevent it to
  34                  * allocate IPA->PA walks, so we enable the S1 MMU...
  35                  */
  36                 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
  37                 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
  38                 write_sysreg_el1(val, SYS_TCR);
  39                 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
  40                 val |= SCTLR_ELx_M;
  41                 write_sysreg_el1(val, SYS_SCTLR);
  42         }
  43 
  44         /*
  45          * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
  46          * most TLB operations target EL2/EL0. In order to affect the
  47          * guest TLBs (EL1/EL0), we need to change one of these two
  48          * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
  49          * let's flip TGE before executing the TLB operation.
  50          *
  51          * ARM erratum 1165522 requires some special handling (again),
  52          * as we need to make sure both stages of translation are in
  53          * place before clearing TGE. __load_guest_stage2() already
  54          * has an ISB in order to deal with this.
  55          */
  56         __load_guest_stage2(kvm);
  57         val = read_sysreg(hcr_el2);
  58         val &= ~HCR_TGE;
  59         write_sysreg(val, hcr_el2);
  60         isb();
  61 }
  62 
  63 static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
  64                                                   struct tlb_inv_context *cxt)
  65 {
  66         __load_guest_stage2(kvm);
  67         isb();
  68 }
  69 
  70 static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
  71                                              struct tlb_inv_context *cxt)
  72 {
  73         if (has_vhe())
  74                 __tlb_switch_to_guest_vhe(kvm, cxt);
  75         else
  76                 __tlb_switch_to_guest_nvhe(kvm, cxt);
  77 }
  78 
  79 static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
  80                                                 struct tlb_inv_context *cxt)
  81 {
  82         /*
  83          * We're done with the TLB operation, let's restore the host's
  84          * view of HCR_EL2.
  85          */
  86         write_sysreg(0, vttbr_el2);
  87         write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
  88         isb();
  89 
  90         if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
  91                 /* Restore the registers to what they were */
  92                 write_sysreg_el1(cxt->tcr, SYS_TCR);
  93                 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
  94         }
  95 
  96         local_irq_restore(cxt->flags);
  97 }
  98 
  99 static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
 100                                                  struct tlb_inv_context *cxt)
 101 {
 102         write_sysreg(0, vttbr_el2);
 103 }
 104 
 105 static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
 106                                             struct tlb_inv_context *cxt)
 107 {
 108         if (has_vhe())
 109                 __tlb_switch_to_host_vhe(kvm, cxt);
 110         else
 111                 __tlb_switch_to_host_nvhe(kvm, cxt);
 112 }
 113 
 114 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 115 {
 116         struct tlb_inv_context cxt;
 117 
 118         dsb(ishst);
 119 
 120         /* Switch to requested VMID */
 121         kvm = kern_hyp_va(kvm);
 122         __tlb_switch_to_guest(kvm, &cxt);
 123 
 124         /*
 125          * We could do so much better if we had the VA as well.
 126          * Instead, we invalidate Stage-2 for this IPA, and the
 127          * whole of Stage-1. Weep...
 128          */
 129         ipa >>= 12;
 130         __tlbi(ipas2e1is, ipa);
 131 
 132         /*
 133          * We have to ensure completion of the invalidation at Stage-2,
 134          * since a table walk on another CPU could refill a TLB with a
 135          * complete (S1 + S2) walk based on the old Stage-2 mapping if
 136          * the Stage-1 invalidation happened first.
 137          */
 138         dsb(ish);
 139         __tlbi(vmalle1is);
 140         dsb(ish);
 141         isb();
 142 
 143         /*
 144          * If the host is running at EL1 and we have a VPIPT I-cache,
 145          * then we must perform I-cache maintenance at EL2 in order for
 146          * it to have an effect on the guest. Since the guest cannot hit
 147          * I-cache lines allocated with a different VMID, we don't need
 148          * to worry about junk out of guest reset (we nuke the I-cache on
 149          * VMID rollover), but we do need to be careful when remapping
 150          * executable pages for the same guest. This can happen when KSM
 151          * takes a CoW fault on an executable page, copies the page into
 152          * a page that was previously mapped in the guest and then needs
 153          * to invalidate the guest view of the I-cache for that page
 154          * from EL1. To solve this, we invalidate the entire I-cache when
 155          * unmapping a page from a guest if we have a VPIPT I-cache but
 156          * the host is running at EL1. As above, we could do better if
 157          * we had the VA.
 158          *
 159          * The moral of this story is: if you have a VPIPT I-cache, then
 160          * you should be running with VHE enabled.
 161          */
 162         if (!has_vhe() && icache_is_vpipt())
 163                 __flush_icache_all();
 164 
 165         __tlb_switch_to_host(kvm, &cxt);
 166 }
 167 
 168 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 169 {
 170         struct tlb_inv_context cxt;
 171 
 172         dsb(ishst);
 173 
 174         /* Switch to requested VMID */
 175         kvm = kern_hyp_va(kvm);
 176         __tlb_switch_to_guest(kvm, &cxt);
 177 
 178         __tlbi(vmalls12e1is);
 179         dsb(ish);
 180         isb();
 181 
 182         __tlb_switch_to_host(kvm, &cxt);
 183 }
 184 
 185 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
 186 {
 187         struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
 188         struct tlb_inv_context cxt;
 189 
 190         /* Switch to requested VMID */
 191         __tlb_switch_to_guest(kvm, &cxt);
 192 
 193         __tlbi(vmalle1);
 194         dsb(nsh);
 195         isb();
 196 
 197         __tlb_switch_to_host(kvm, &cxt);
 198 }
 199 
 200 void __hyp_text __kvm_flush_vm_context(void)
 201 {
 202         dsb(ishst);
 203         __tlbi(alle1is);
 204 
 205         /*
 206          * VIPT and PIPT caches are not affected by VMID, so no maintenance
 207          * is necessary across a VMID rollover.
 208          *
 209          * VPIPT caches constrain lookup and maintenance to the active VMID,
 210          * so we need to invalidate lines with a stale VMID to avoid an ABA
 211          * race after multiple rollovers.
 212          *
 213          */
 214         if (icache_is_vpipt())
 215                 asm volatile("ic ialluis");
 216 
 217         dsb(ish);
 218 }

/* [<][>][^][v][top][bottom][index][help] */