/linux-4.1.27/arch/cris/arch-v32/mm/ |
D | init.c | 50 mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | in cris_mmu_init() 51 REG_STATE(mmu, rw_mm_cfg, acc, on) | in cris_mmu_init() 52 REG_STATE(mmu, rw_mm_cfg, ex, on) | in cris_mmu_init() 53 REG_STATE(mmu, rw_mm_cfg, inv, on) | in cris_mmu_init() 55 REG_STATE(mmu, rw_mm_cfg, seg_f, page) | in cris_mmu_init() 56 REG_STATE(mmu, rw_mm_cfg, seg_e, page) | in cris_mmu_init() 57 REG_STATE(mmu, rw_mm_cfg, seg_d, linear) | in cris_mmu_init() 59 REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | in cris_mmu_init() 60 REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | in cris_mmu_init() 61 REG_STATE(mmu, rw_mm_cfg, seg_d, page) | in cris_mmu_init() [all …]
|
D | tlb.c | 19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \ 48 int mmu; in __flush_tlb_all() local 59 for (mmu = 1; mmu <= 2; mmu++) { in __flush_tlb_all() 60 SUPP_BANK_SEL(mmu); /* Select the MMU */ in __flush_tlb_all() 63 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); in __flush_tlb_all() 65 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) in __flush_tlb_all() 66 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); in __flush_tlb_all() 82 int mmu; in __flush_tlb_mm() local 96 for (mmu = 1; mmu <= 2; mmu++) { in __flush_tlb_mm() 97 SUPP_BANK_SEL(mmu); in __flush_tlb_mm() [all …]
|
D | mmu.S | 39 .macro MMU_BUS_FAULT_HANDLER handler, mmu, we, ex 44 move \mmu, $srs ; Select MMU support register bank 82 .macro MMU_REFILL_HANDLER handler, mmu 95 move \mmu, $srs ; Select MMU support register bank 185 move \mmu, $srs
|
D | Makefile | 3 obj-y += mmu.o init.o tlb.o intmem.o
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | base.c | 33 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_at() local 35 int big = vma->node->type != mmu->spg_shift; in nvkm_vm_map_at() 38 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; in nvkm_vm_map_at() 39 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; in nvkm_vm_map_at() 40 u32 max = 1 << (mmu->pgt_bits - bits); in nvkm_vm_map_at() 56 mmu->map(vma, pgt, node, pte, len, phys, delta); in nvkm_vm_map_at() 70 mmu->flush(vm); in nvkm_vm_map_at() 78 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg_table() local 79 int big = vma->node->type != mmu->spg_shift; in nvkm_vm_map_sg_table() 83 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; in nvkm_vm_map_sg_table() [all …]
|
D | Kbuild | 1 nvkm-y += nvkm/subdev/mmu/base.o 2 nvkm-y += nvkm/subdev/mmu/nv04.o 3 nvkm-y += nvkm/subdev/mmu/nv41.o 4 nvkm-y += nvkm/subdev/mmu/nv44.o 5 nvkm-y += nvkm/subdev/mmu/nv50.o 6 nvkm-y += nvkm/subdev/mmu/gf100.o
|
D | nv50.c | 87 if (nvkm_fb(vma->vm->mmu)->ram->stolen) { in nv50_vm_map() 88 phys += nvkm_fb(vma->vm->mmu)->ram->stolen; in nv50_vm_map() 152 struct nv50_mmu_priv *priv = (void *)vm->mmu; in nv50_vm_flush() 195 nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, in nv50_vm_create() argument 198 u32 block = (1 << (mmu->pgt_bits + 12)); in nv50_vm_create() 202 return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm); in nv50_vm_create()
|
D | gf100.c | 115 struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu); in gf100_vm_map() 161 struct gf100_mmu_priv *priv = (void *)vm->mmu; in gf100_vm_flush() 195 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, in gf100_vm_create() argument 198 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm); in gf100_vm_create()
|
D | nv44.c | 88 struct nv04_mmu_priv *priv = (void *)vma->vm->mmu; in nv44_vm_map_sg() 143 struct nv04_mmu_priv *priv = (void *)vm->mmu; in nv44_vm_flush()
|
D | nv04.c | 73 nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart, in nv04_vm_create() argument
|
D | nv41.c | 68 struct nv04_mmu_priv *priv = (void *)vm->mmu; in nv41_vm_flush()
|
/linux-4.1.27/drivers/iommu/ |
D | ipmmu-vmsa.c | 40 struct ipmmu_vmsa_device *mmu; member 51 struct ipmmu_vmsa_device *mmu; member 188 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 190 return ioread32(mmu->base + offset); in ipmmu_read() 193 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument 196 iowrite32(data, mmu->base + offset); in ipmmu_write() 201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read() 207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write() 222 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync() 247 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local [all …]
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
D | head.S | 62 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \ 63 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \ 64 | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 5) \ 65 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0 67 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \ 68 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \ 69 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0 73 move.d REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 4) \ 74 | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0), $r1 90 move.d REG_STATE(mmu, rw_mm_cfg, we, on) \ [all …]
|
/linux-4.1.27/arch/arc/mm/ |
D | tlb.c | 221 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in local_flush_tlb_all() local 229 for (entry = 0; entry < mmu->num_tlb; entry++) { in local_flush_tlb_all() 565 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in read_decode_mmu_bcr() local 586 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr() 588 if (mmu->ver <= 2) { in read_decode_mmu_bcr() 590 mmu->pg_sz = PAGE_SIZE; in read_decode_mmu_bcr() 591 mmu->sets = 1 << mmu2->sets; in read_decode_mmu_bcr() 592 mmu->ways = 1 << mmu2->ways; in read_decode_mmu_bcr() 593 mmu->u_dtlb = mmu2->u_dtlb; in read_decode_mmu_bcr() 594 mmu->u_itlb = mmu2->u_itlb; in read_decode_mmu_bcr() [all …]
|
D | tlbex.S | 218 ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
|
/linux-4.1.27/drivers/gpu/drm/msm/ |
D | msm_mmu.h | 24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); 25 void (*detach)(struct msm_mmu *mmu, const char **names, int cnt); 26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 30 void (*destroy)(struct msm_mmu *mmu); 38 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 41 mmu->dev = dev; in msm_mmu_init() 42 mmu->funcs = funcs; in msm_mmu_init()
|
D | msm_iommu.c | 34 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) in msm_iommu_attach() argument 36 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach() 37 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach() 40 static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) in msm_iommu_detach() argument 42 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach() 43 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach() 46 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_map() argument 49 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map() 85 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_unmap() argument 88 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap() [all …]
|
D | msm_gem.c | 297 struct msm_mmu *mmu = priv->mmus[id]; in msm_gem_get_iova_locked() local 300 if (WARN_ON(!mmu)) in msm_gem_get_iova_locked() 304 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, in msm_gem_get_iova_locked() 526 struct msm_mmu *mmu = priv->mmus[id]; in msm_gem_free_object() local 527 if (mmu && msm_obj->domain[id].iova) { in msm_gem_free_object() 529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); in msm_gem_free_object()
|
D | msm_gpu.c | 609 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); in msm_gpu_init() 613 gpu->id = msm_register_mmu(drm, gpu->mmu); in msm_gpu_init() 649 if (gpu->mmu) in msm_gpu_cleanup() 650 gpu->mmu->funcs->destroy(gpu->mmu); in msm_gpu_cleanup()
|
D | msm_gpu.h | 98 struct msm_mmu *mmu; member
|
D | msm_drv.h | 165 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
D | msm_drv.c | 38 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) in msm_register_mmu() argument 46 priv->mmus[idx] = mmu; in msm_register_mmu()
|
/linux-4.1.27/arch/metag/mm/ |
D | Makefile | 12 mmu-y := mmu-meta1.o 13 mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o 14 obj-y += $(mmu-y)
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_32_mmu.c | 414 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_32_init() local 416 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; in kvmppc_mmu_book3s_32_init() 417 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; in kvmppc_mmu_book3s_32_init() 418 mmu->xlate = kvmppc_mmu_book3s_32_xlate; in kvmppc_mmu_book3s_32_init() 419 mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; in kvmppc_mmu_book3s_32_init() 420 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; in kvmppc_mmu_book3s_32_init() 421 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; in kvmppc_mmu_book3s_32_init() 422 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; in kvmppc_mmu_book3s_32_init() 423 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; in kvmppc_mmu_book3s_32_init() 425 mmu->slbmte = NULL; in kvmppc_mmu_book3s_32_init() [all …]
|
D | book3s_emulate.c | 155 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr() 157 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); in kvmppc_core_emulate_op_pr() 167 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr() 169 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); in kvmppc_core_emulate_op_pr() 175 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 180 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 189 vcpu->arch.mmu.tlbie(vcpu, addr, large); in kvmppc_core_emulate_op_pr() 223 if (!vcpu->arch.mmu.slbmte) in kvmppc_core_emulate_op_pr() 226 vcpu->arch.mmu.slbmte(vcpu, in kvmppc_core_emulate_op_pr() 231 if (!vcpu->arch.mmu.slbie) in kvmppc_core_emulate_op_pr() [all …]
|
D | book3s_64_mmu.c | 658 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_64_init() local 660 mmu->mfsrin = NULL; in kvmppc_mmu_book3s_64_init() 661 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; in kvmppc_mmu_book3s_64_init() 662 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; in kvmppc_mmu_book3s_64_init() 663 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; in kvmppc_mmu_book3s_64_init() 664 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; in kvmppc_mmu_book3s_64_init() 665 mmu->slbie = kvmppc_mmu_book3s_64_slbie; in kvmppc_mmu_book3s_64_init() 666 mmu->slbia = kvmppc_mmu_book3s_64_slbia; in kvmppc_mmu_book3s_64_init() 667 mmu->xlate = kvmppc_mmu_book3s_64_xlate; in kvmppc_mmu_book3s_64_init() 668 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; in kvmppc_mmu_book3s_64_init() [all …]
|
D | book3s_64_mmu_host.c | 117 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 221 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page() 321 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
|
D | book3s_pr_papr.c | 109 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); in kvmppc_h_pr_remove() 191 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); in kvmppc_h_pr_bulk_remove() 234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); in kvmppc_h_pr_protect()
|
D | book3s_pr.c | 430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && in kvmppc_set_pvr_pr() 550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault() 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault() 585 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 631 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 930 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr() 1287 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, in kvm_arch_vcpu_ioctl_set_sregs_pr() 1292 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); in kvm_arch_vcpu_ioctl_set_sregs_pr()
|
D | book3s_32_mmu_host.c | 169 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 322 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
|
D | book3s_64_mmu_hv.c | 1629 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_hv_init() local 1633 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; in kvmppc_mmu_book3s_hv_init() 1634 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; in kvmppc_mmu_book3s_hv_init()
|
D | book3s.c | 129 vcpu->arch.mmu.reset_msr(vcpu); in kvmppc_inject_interrupt() 402 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate()
|
/linux-4.1.27/fs/ramfs/ |
D | Makefile | 7 file-mmu-y := file-nommu.o 8 file-mmu-$(CONFIG_MMU) := file-mmu.o 9 ramfs-objs += inode.o $(file-mmu-y)
|
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp5/ |
D | mdp5_kms.c | 114 struct msm_mmu *mmu = mdp5_kms->mmu; in mdp5_destroy() local 118 if (mmu) { in mdp5_destroy() 119 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); in mdp5_destroy() 120 mmu->funcs->destroy(mmu); in mdp5_destroy() 414 struct msm_mmu *mmu; in mdp5_kms_init() local 526 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); in mdp5_kms_init() 527 if (IS_ERR(mmu)) { in mdp5_kms_init() 528 ret = PTR_ERR(mmu); in mdp5_kms_init() 533 ret = mmu->funcs->attach(mmu, iommu_ports, in mdp5_kms_init() 537 mmu->funcs->destroy(mmu); in mdp5_kms_init() [all …]
|
D | mdp5_kms.h | 38 struct msm_mmu *mmu; member
|
/linux-4.1.27/arch/um/kernel/skas/ |
D | mmu.c | 139 struct mm_context *mmu = &mm->context; in destroy_context() local 147 if (mmu->id.u.pid < 2) { in destroy_context() 149 mmu->id.u.pid); in destroy_context() 152 os_kill_ptraced_process(mmu->id.u.pid, 1); in destroy_context() 154 free_page(mmu->id.stack); in destroy_context() 155 free_ldt(mmu); in destroy_context()
|
D | Makefile | 6 obj-y := clone.o mmu.o process.o syscall.o uaccess.o
|
/linux-4.1.27/arch/m68k/kernel/ |
D | setup_mm.c | 407 const char *cpu, *mmu, *fpu; in show_cpuinfo() local 456 mmu = "68851"; in show_cpuinfo() 458 mmu = "68030"; in show_cpuinfo() 460 mmu = "68040"; in show_cpuinfo() 462 mmu = "68060"; in show_cpuinfo() 464 mmu = "Sun-3"; in show_cpuinfo() 466 mmu = "Apollo"; in show_cpuinfo() 468 mmu = "ColdFire"; in show_cpuinfo() 470 mmu = "unknown"; in show_cpuinfo() 480 cpu, mmu, fpu, in show_cpuinfo()
|
D | setup_no.c | 271 char *cpu, *mmu, *fpu; in show_cpuinfo() local 275 mmu = "none"; in show_cpuinfo() 285 cpu, mmu, fpu, in show_cpuinfo()
|
/linux-4.1.27/arch/x86/kvm/ |
D | mmu.c | 1836 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page() 1885 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages() 2023 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page() 2029 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page() 2030 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page() 2085 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init() 2086 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init() 2089 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init() 2090 !vcpu->arch.mmu.direct_map) in shadow_walk_init() 2095 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init() [all …]
|
D | paging_tmpl.h | 131 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME() 135 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | in FNAME() 136 ((mmu->bad_mt_xwr & (1ull << low6)) != 0); in FNAME() 148 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 175 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME() 208 struct kvm_mmu *mmu, in FNAME() 255 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME() 269 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 288 walker->level = mmu->root_level; in FNAME() 289 pte = mmu->get_cr3(vcpu); in FNAME() [all …]
|
D | mmu.h | 86 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) in kvm_mmu_reload() 144 static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument 169 return (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
|
D | mmu_audit.c | 62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_spte_walk() 65 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_spte_walk() 66 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_spte_walk() 74 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_spte_walk() 125 "ent %llxn", vcpu->arch.mmu.root_level, pfn, in audit_mappings()
|
D | Makefile | 14 kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
|
D | x86.c | 429 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault() 481 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_read_guest_page_mmu() argument 490 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu() 510 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument 516 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs() 518 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, in load_pdptrs() 527 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { in load_pdptrs() 534 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs() 4207 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa() 5172 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction() [all …]
|
D | vmx.c | 3473 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in vmx_flush_tlb() 3475 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); in vmx_flush_tlb() 3504 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_load_pdptrs() local 3511 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); in ept_load_pdptrs() 3512 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); in ept_load_pdptrs() 3513 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); in ept_load_pdptrs() 3514 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); in ept_load_pdptrs() 3520 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs() local 3523 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); in ept_save_pdptrs() 3524 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); in ept_save_pdptrs() [all …]
|
D | svm.c | 2008 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; in nested_svm_init_mmu_context() 2009 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; in nested_svm_init_mmu_context() 2010 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; in nested_svm_init_mmu_context() 2011 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; in nested_svm_init_mmu_context() 2012 vcpu->arch.mmu.shadow_root_level = get_npt_level(); in nested_svm_init_mmu_context() 2018 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_svm_uninit_mmu_context()
|
/linux-4.1.27/Documentation/devicetree/bindings/iommu/ |
D | arm,smmu.txt | 16 "arm,mmu-400" 17 "arm,mmu-401" 18 "arm,mmu-500" 37 - mmu-masters : A list of phandles to device nodes representing bus 69 mmu-masters = <&dma0 0xd01d 0xd01e>,
|
D | ti,omap-iommu.txt | 20 mmu_isp: mmu@480bd400 {
|
D | renesas,ipmmu-vmsa.txt | 29 ipmmu_mx: mmu@fe951000 {
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/ |
D | nv04.c | 63 struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj); in nv04_dmaobj_bind() local 64 struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0]; in nv04_dmaobj_bind() 89 struct nv04_mmu_priv *mmu = nv04_mmu(engine); in nv04_dmaobj_ctor() local 99 if (nv_object(mmu)->oclass == &nv04_mmu_oclass) in nv04_dmaobj_ctor()
|
/linux-4.1.27/drivers/gpu/drm/gma500/ |
D | psb_drv.c | 182 if (dev_priv->mmu) { in psb_driver_unload() 188 (dev_priv->mmu), in psb_driver_unload() 192 psb_mmu_driver_takedown(dev_priv->mmu); in psb_driver_unload() 193 dev_priv->mmu = NULL; in psb_driver_unload() 328 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); in psb_driver_load() 329 if (!dev_priv->mmu) in psb_driver_load() 332 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); in psb_driver_load() 342 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu), in psb_driver_load() 348 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); in psb_driver_load()
|
D | Makefile | 15 mmu.o \
|
D | gtt.c | 262 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), in psb_gtt_pin() 304 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), in psb_gtt_unpin()
|
D | psb_drv.h | 468 struct psb_mmu_driver *mmu; member
|
/linux-4.1.27/arch/m32r/mm/ |
D | Makefile | 6 obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o 8 obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
|
/linux-4.1.27/mm/ |
D | Makefile | 8 mmu-y := nommu.o 9 mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 14 mmu-$(CONFIG_MMU) += process_vm_access.o 24 debug.o $(mmu-y)
|
/linux-4.1.27/arch/sh/mm/ |
D | Makefile | 17 mmu-y := nommu.o extable_32.o 18 mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \ 21 obj-y += $(mmu-y)
|
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp4/ |
D | mdp4_kms.c | 387 struct msm_mmu *mmu; in mdp4_kms_init() local 476 mmu = msm_iommu_new(&pdev->dev, config->iommu); in mdp4_kms_init() 477 if (IS_ERR(mmu)) { in mdp4_kms_init() 478 ret = PTR_ERR(mmu); in mdp4_kms_init() 481 ret = mmu->funcs->attach(mmu, iommu_ports, in mdp4_kms_init() 488 mmu = NULL; in mdp4_kms_init() 491 mdp4_kms->id = msm_register_mmu(dev, mmu); in mdp4_kms_init()
|
/linux-4.1.27/Documentation/virtual/kvm/ |
D | 00-INDEX | 13 mmu.txt 14 - the x86 kvm shadow mmu.
|
D | locking.txt | 15 the mmu-lock on x86. Currently, the page fault can be fast only if the 22 - SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when 23 the gfn is writable on guest mmu and it is not write-protected by shadow 114 if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means, 122 As mentioned before, the spte can be updated to writable out of mmu-lock on 127 Since the spte is "volatile" if it can be updated out of mmu-lock, we always 157 Comment: it is a spinlock since it is used in mmu notifier.
|
D | mmu.txt | 1 The x86 kvm shadow mmu 4 The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible 5 for presenting a standard x86 mmu to the guest, while translating guest 8 The mmu code attempts to satisfy the following requirements: 11 on an emulated mmu except for timing (we attempt to comply 16 - performance: minimize the performance penalty imposed by the mmu 48 The mmu supports first-generation mmu hardware, which allows an atomic switch 51 it exposes is the traditional 2/3/4 level x86 mmu, with support for global 58 The primary job of the mmu is to program the processor's mmu to translate 72 number of required translations matches the hardware, the mmu operates in [all …]
|
D | api.txt | 3365 addresses of mmu-type-specific data structures. The "array_len" field is an 3379 For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
|
/linux-4.1.27/drivers/gpu/drm/nouveau/ |
D | nouveau_chan.c | 91 struct nvkm_mmu *mmu = nvxx_mmu(device); in nouveau_channel_prep() local 139 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_prep() 168 args.limit = mmu->limit - 1; in nouveau_channel_prep() 285 struct nvkm_mmu *mmu = nvxx_mmu(device); in nouveau_channel_init() local 298 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_init() 316 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_init() 328 args.limit = mmu->limit - 1; in nouveau_channel_init()
|
D | nouveau_ttm.c | 214 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); in nv04_gart_manager_init() local 215 struct nv04_mmu_priv *priv = (void *)mmu; in nv04_gart_manager_init()
|
D | nouveau_bo.c | 196 lpg_shift = drm->client.vm->mmu->lpg_shift; in nouveau_bo_new() 223 nvbo->page_shift = drm->client.vm->mmu->lpg_shift; in nouveau_bo_new() 1245 nvbo->page_shift != vma->vm->mmu->lpg_shift)) { in nouveau_bo_move_ntfy() 1644 nvbo->page_shift != vma->vm->mmu->lpg_shift)) in nouveau_bo_vma_add()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | wuf.S | 265 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg 266 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg 281 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again 282 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
|
/linux-4.1.27/drivers/gpu/drm/msm/adreno/ |
D | adreno_gpu.c | 308 struct msm_mmu *mmu; in adreno_gpu_init() local 347 mmu = gpu->mmu; in adreno_gpu_init() 348 if (mmu) { in adreno_gpu_init() 349 ret = mmu->funcs->attach(mmu, iommu_ports, in adreno_gpu_init()
|
D | a4xx_gpu.c | 584 if (!gpu->mmu) { in a4xx_gpu_init()
|
D | a3xx_gpu.c | 577 if (!gpu->mmu) { in a3xx_gpu_init()
|
/linux-4.1.27/arch/arm/lib/ |
D | Makefile | 18 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \ 24 lib-$(CONFIG_MMU) += $(mmu-y)
|
/linux-4.1.27/Documentation/frv/ |
D | README.txt | 32 (*) mmu-layout.txt 41 MMU state on the FR451. See mmu-layout.txt for more information.
|
D | configuring.txt | 102 (*) defconfig-mmu 107 (*) defconfig-mmu-audio 113 (*) defconfig-mmu-fb 119 (*) defconfig-mmu-standalone
|
D | kernel-ABI.txt | 104 SCR0 MMU See mmu-layout.txt. 105 SCR1 MMU See mmu-layout.txt. 110 DAMR/IAMR MMU See mmu-layout.txt.
|
D | features.txt | 59 See mmu-layout.txt in this directory for a description of the normal linux
|
/linux-4.1.27/arch/xtensa/mm/ |
D | Makefile | 6 obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
|
/linux-4.1.27/arch/nios2/kernel/ |
D | cpuinfo.c | 75 cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); in setup_cpuinfo() 139 cpuinfo.mmu ? "present" : "none", in show_cpuinfo()
|
/linux-4.1.27/arch/frv/mm/ |
D | Makefile | 9 mmu-context.o dma-alloc.o elf-fdpic.o
|
/linux-4.1.27/arch/c6x/kernel/ |
D | setup.c | 78 const char *mmu; member 121 p->mmu = "none"; in get_cpuinfo() 470 p->core_id, p->mmu, p->fpu, in show_cpuinfo()
|
/linux-4.1.27/arch/arm64/mm/ |
D | Makefile | 3 ioremap.o mmap.o pgd.o mmu.o \
|
/linux-4.1.27/arch/unicore32/mm/ |
D | Makefile | 5 obj-y := extable.o fault.o init.o pgd.o mmu.o
|
D | proc-ucv2.S | 49 movc p0.c1, ip, #0 @ disable caches and mmu
|
/linux-4.1.27/arch/nios2/include/asm/ |
D | cpuinfo.h | 28 u32 mmu; member
|
/linux-4.1.27/Documentation/devicetree/bindings/nios2/ |
D | nios2.txt | 27 - altr,has-mmu: Specifies CPU support MMU support, should be 1. 61 altr,has-mmu = <1>;
|
/linux-4.1.27/arch/x86/xen/ |
D | Makefile | 13 obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
|
/linux-4.1.27/arch/frv/kernel/ |
D | Makefile | 6 heads-$(CONFIG_MMU) := head-mmu-fr451.o
|
/linux-4.1.27/arch/microblaze/boot/dts/ |
D | system.dts | 104 xlnx,mmu-dtlb-size = <0x4>; 105 xlnx,mmu-itlb-size = <0x2>; 106 xlnx,mmu-tlb-access = <0x3>; 107 xlnx,mmu-zones = <0x10>; 128 xlnx,use-mmu = <0x3>;
|
/linux-4.1.27/arch/arm/kvm/ |
D | Makefile | 21 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
|
/linux-4.1.27/arch/microblaze/include/asm/ |
D | cpuinfo.h | 40 u32 mmu; member
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/ |
D | Kbuild | 14 include $(src)/nvkm/subdev/mmu/Kbuild
|
/linux-4.1.27/arch/arm/boot/dts/ |
D | r8a7794.dtsi | 684 ipmmu_sy0: mmu@e6280000 { 693 ipmmu_sy1: mmu@e6290000 { 701 ipmmu_ds: mmu@e6740000 { 709 ipmmu_mp: mmu@ec680000 { 717 ipmmu_mx: mmu@fe951000 { 725 ipmmu_gp: mmu@e62a0000 {
|
D | r8a7791.dtsi | 1428 ipmmu_sy0: mmu@e6280000 { 1437 ipmmu_sy1: mmu@e6290000 { 1445 ipmmu_ds: mmu@e6740000 { 1454 ipmmu_mp: mmu@ec680000 { 1462 ipmmu_mx: mmu@fe951000 { 1471 ipmmu_rt: mmu@ffc80000 { 1479 ipmmu_gp: mmu@e62a0000 {
|
D | r8a7790.dtsi | 1601 ipmmu_sy0: mmu@e6280000 { 1610 ipmmu_sy1: mmu@e6290000 { 1618 ipmmu_ds: mmu@e6740000 { 1627 ipmmu_mp: mmu@ec680000 { 1635 ipmmu_mx: mmu@fe951000 { 1644 ipmmu_rt: mmu@ffc80000 {
|
D | omap3.dtsi | 458 mmu_isp: mmu@480bd400 { 467 mmu_iva: mmu@5d000000 {
|
D | omap4.dtsi | 550 mmu_dsp: mmu@4a066000 { 557 mmu_ipu: mmu@55082000 {
|
D | omap5.dtsi | 611 mmu_dsp: mmu@4a066000 { 618 mmu_ipu: mmu@55082000 {
|
/linux-4.1.27/arch/arm64/kvm/ |
D | Makefile | 15 kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
|
/linux-4.1.27/arch/mn10300/mm/ |
D | Makefile | 28 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
|
/linux-4.1.27/arch/cris/include/arch-v32/arch/hwregs/ |
D | Makefile | 58 REGDESC += $(BASEDIR)/core/cpu/mmu/doc/mmu_regs.r 121 mmu_defs_asm.h: $(BASEDIR)/core/cpu/mmu/doc/mmu_regs.r
|
/linux-4.1.27/arch/microblaze/kernel/cpu/ |
D | mb.c | 74 seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu); in show_cpuinfo()
|
D | cpuinfo-pvr-full.c | 73 CI(mmu, USE_MMU); in set_cpuinfo_pvr_full()
|
D | cpuinfo-static.c | 119 ci->mmu = fcpu(cpu, "xlnx,use-mmu"); in set_cpuinfo_static()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
D | mmu.h | 28 struct nvkm_mmu *mmu; member
|
/linux-4.1.27/arch/c6x/include/asm/ |
D | Kbuild | 30 generic-y += mmu.h
|
/linux-4.1.27/arch/arc/include/asm/ |
D | arcregs.h | 319 struct cpuinfo_arc_mmu mmu; member
|
/linux-4.1.27/arch/powerpc/boot/dts/fsl/ |
D | e500v2_power_isa.dtsi | 50 mmu-type = "power-embedded";
|
D | e500mc_power_isa.dtsi | 57 mmu-type = "power-embedded";
|
D | e5500_power_isa.dtsi | 58 mmu-type = "power-embedded";
|
D | e6500_power_isa.dtsi | 63 mmu-type = "power-embedded";
|
/linux-4.1.27/arch/unicore32/kernel/ |
D | sleep.S | 176 movc p0.c1, r5, #0 @ control reg, turn on mmu
|
/linux-4.1.27/arch/arm/mm/ |
D | Makefile | 9 mmap.o pgd.o mmu.o pageattr.o
|
D | proc-xsc3.S | 479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
|
/linux-4.1.27/arch/nios2/boot/dts/ |
D | 3c120_devboard.dts | 54 altr,has-mmu = <1>;
|
/linux-4.1.27/arch/arm/kernel/ |
D | head.S | 138 @ mmu has been enabled 383 sub lr, r4, r5 @ mmu has been enabled
|
/linux-4.1.27/arch/x86/include/asm/ |
D | kvm_host.h | 381 struct kvm_mmu mmu; member 889 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 982 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
/linux-4.1.27/arch/blackfin/kernel/ |
D | setup.c | 1272 char *cpu, *mmu, *fpu, *vendor, *cache; local 1280 mmu = "none";
|
/linux-4.1.27/Documentation/cris/ |
D | README | 153 mmu : yes
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | kvm_host.h | 405 struct kvmppc_mmu mmu; member
|
/linux-4.1.27/arch/sparc/mm/ |
D | init_64.c | 575 unsigned long mmu) in hypervisor_tlb_lock() argument 577 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); in hypervisor_tlb_lock() 581 "errors with %lx\n", vaddr, 0, pte, mmu, ret); in hypervisor_tlb_lock()
|
/linux-4.1.27/arch/cris/arch-v10/ |
D | README.mm | 153 [ References: asm/pgtable.h, asm/page.h, asm/mmu.h ]
|
/linux-4.1.27/Documentation/ |
D | 00-INDEX | 339 - documentation about no-mmu memory mapping support.
|
/linux-4.1.27/arch/arm/boot/compressed/ |
D | head.S | 809 orr r0, r0, #0x000d @ Write buffer, mmu
|
/linux-4.1.27/arch/sh/kernel/cpu/sh5/ |
D | entry.S | 615 ! the mmu back on
|