| /linux-4.4.14/arch/cris/arch-v32/mm/ |
| D | init.c | 50 mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | in cris_mmu_init() 51 REG_STATE(mmu, rw_mm_cfg, acc, on) | in cris_mmu_init() 52 REG_STATE(mmu, rw_mm_cfg, ex, on) | in cris_mmu_init() 53 REG_STATE(mmu, rw_mm_cfg, inv, on) | in cris_mmu_init() 55 REG_STATE(mmu, rw_mm_cfg, seg_f, page) | in cris_mmu_init() 56 REG_STATE(mmu, rw_mm_cfg, seg_e, page) | in cris_mmu_init() 57 REG_STATE(mmu, rw_mm_cfg, seg_d, linear) | in cris_mmu_init() 59 REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | in cris_mmu_init() 60 REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | in cris_mmu_init() 61 REG_STATE(mmu, rw_mm_cfg, seg_d, page) | in cris_mmu_init() [all …]
|
| D | tlb.c | 19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \ 48 int mmu; in __flush_tlb_all() local 59 for (mmu = 1; mmu <= 2; mmu++) { in __flush_tlb_all() 60 SUPP_BANK_SEL(mmu); /* Select the MMU */ in __flush_tlb_all() 63 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); in __flush_tlb_all() 65 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) in __flush_tlb_all() 66 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); in __flush_tlb_all() 82 int mmu; in __flush_tlb_mm() local 96 for (mmu = 1; mmu <= 2; mmu++) { in __flush_tlb_mm() 97 SUPP_BANK_SEL(mmu); in __flush_tlb_mm() [all …]
|
| D | mmu.S | 39 .macro MMU_BUS_FAULT_HANDLER handler, mmu, we, ex 44 move \mmu, $srs ; Select MMU support register bank 82 .macro MMU_REFILL_HANDLER handler, mmu 95 move \mmu, $srs ; Select MMU support register bank 185 move \mmu, $srs
|
| D | Makefile | 3 obj-y += mmu.o init.o tlb.o intmem.o
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| D | base.c | 33 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_at() local 35 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_at() 38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_at() 39 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; in nvkm_vm_map_at() 40 u32 max = 1 << (mmu->func->pgt_bits - bits); in nvkm_vm_map_at() 56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); in nvkm_vm_map_at() 70 mmu->func->flush(vm); in nvkm_vm_map_at() 78 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg_table() local 79 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_sg_table() 83 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_sg_table() [all …]
|
| D | nv04.c | 78 struct nv04_mmu *mmu = nv04_mmu(base); in nv04_mmu_oneinit() local 79 struct nvkm_device *device = mmu->base.subdev.device; in nv04_mmu_oneinit() 83 ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL, in nv04_mmu_oneinit() 84 &mmu->vm); in nv04_mmu_oneinit() 91 mmu->vm->pgt[0].mem[0] = dma; in nv04_mmu_oneinit() 92 mmu->vm->pgt[0].refcount[0] = 1; in nv04_mmu_oneinit() 106 struct nv04_mmu *mmu = nv04_mmu(base); in nv04_mmu_dtor() local 107 struct nvkm_device *device = mmu->base.subdev.device; in nv04_mmu_dtor() 108 if (mmu->vm) { in nv04_mmu_dtor() 109 nvkm_memory_del(&mmu->vm->pgt[0].mem[0]); in nv04_mmu_dtor() [all …]
|
| D | nv44.c | 87 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); in nv44_vm_map_sg() local 95 nv44_vm_fill(pgt, mmu->null, list, pte, part); in nv44_vm_map_sg() 112 nv44_vm_fill(pgt, mmu->null, list, pte, cnt); in nv44_vm_map_sg() 119 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); in nv44_vm_unmap() local 125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part); in nv44_vm_unmap() 139 nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt); in nv44_vm_unmap() 146 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); in nv44_vm_flush() local 147 struct nvkm_device *device = mmu->base.subdev.device; in nv44_vm_flush() 148 nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE); in nv44_vm_flush() 164 struct nv04_mmu *mmu = nv04_mmu(base); in nv44_mmu_oneinit() local [all …]
|
| D | nv41.c | 71 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); in nv41_vm_flush() local 72 struct nvkm_device *device = mmu->base.subdev.device; in nv41_vm_flush() 74 mutex_lock(&mmu->base.subdev.mutex); in nv41_vm_flush() 81 mutex_unlock(&mmu->base.subdev.mutex); in nv41_vm_flush() 91 struct nv04_mmu *mmu = nv04_mmu(base); in nv41_mmu_oneinit() local 92 struct nvkm_device *device = mmu->base.subdev.device; in nv41_mmu_oneinit() 95 ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL, in nv41_mmu_oneinit() 96 &mmu->vm); in nv41_mmu_oneinit() 102 &mmu->vm->pgt[0].mem[0]); in nv41_mmu_oneinit() 103 mmu->vm->pgt[0].refcount[0] = 1; in nv41_mmu_oneinit() [all …]
|
| D | Kbuild | 1 nvkm-y += nvkm/subdev/mmu/base.o 2 nvkm-y += nvkm/subdev/mmu/nv04.o 3 nvkm-y += nvkm/subdev/mmu/nv41.o 4 nvkm-y += nvkm/subdev/mmu/nv44.o 5 nvkm-y += nvkm/subdev/mmu/nv50.o 6 nvkm-y += nvkm/subdev/mmu/gf100.o
|
| D | gf100.c | 112 struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc; in gf100_vm_map() 164 struct nvkm_mmu *mmu = vm->mmu; in gf100_vm_flush() local 165 struct nvkm_device *device = mmu->subdev.device; in gf100_vm_flush() 173 mutex_lock(&mmu->subdev.mutex); in gf100_vm_flush() 192 mutex_unlock(&mmu->subdev.mutex); in gf100_vm_flush() 196 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, in gf100_vm_create() argument 199 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm); in gf100_vm_create()
|
| D | nv50.c | 80 struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram; in nv50_vm_map() 158 struct nvkm_mmu *mmu = vm->mmu; in nv50_vm_flush() local 159 struct nvkm_subdev *subdev = &mmu->subdev; in nv50_vm_flush() 202 nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, in nv50_vm_create() argument 205 u32 block = (1 << (mmu->func->pgt_bits + 12)); in nv50_vm_create() 209 return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm); in nv50_vm_create()
|
| /linux-4.4.14/drivers/iommu/ |
| D | ipmmu-vmsa.c | 40 struct ipmmu_vmsa_device *mmu; member 51 struct ipmmu_vmsa_device *mmu; member 188 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 190 return ioread32(mmu->base + offset); in ipmmu_read() 193 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument 196 iowrite32(data, mmu->base + offset); in ipmmu_write() 201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read() 207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write() 222 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync() 247 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local [all …]
|
| /linux-4.4.14/arch/cris/arch-v32/kernel/ |
| D | head.S | 62 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \ 63 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \ 64 | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 5) \ 65 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0 67 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \ 68 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \ 69 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0 73 move.d REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 4) \ 74 | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0), $r1 90 move.d REG_STATE(mmu, rw_mm_cfg, we, on) \ [all …]
|
| /linux-4.4.14/arch/arc/mm/ |
| D | tlb.c | 251 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in local_flush_tlb_all() local 254 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all() 727 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in read_decode_mmu_bcr() local 759 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr() 761 if (mmu->ver <= 2) { in read_decode_mmu_bcr() 763 mmu->pg_sz_k = TO_KB(0x2000); in read_decode_mmu_bcr() 764 mmu->sets = 1 << mmu2->sets; in read_decode_mmu_bcr() 765 mmu->ways = 1 << mmu2->ways; in read_decode_mmu_bcr() 766 mmu->u_dtlb = mmu2->u_dtlb; in read_decode_mmu_bcr() 767 mmu->u_itlb = mmu2->u_itlb; in read_decode_mmu_bcr() [all …]
|
| D | tlbex.S | 253 ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
|
| /linux-4.4.14/drivers/gpu/drm/msm/ |
| D | msm_mmu.h | 24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); 25 void (*detach)(struct msm_mmu *mmu, const char **names, int cnt); 26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 30 void (*destroy)(struct msm_mmu *mmu); 38 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 41 mmu->dev = dev; in msm_mmu_init() 42 mmu->funcs = funcs; in msm_mmu_init()
|
| D | msm_iommu.c | 34 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) in msm_iommu_attach() argument 36 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach() 37 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach() 40 static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) in msm_iommu_detach() argument 42 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach() 43 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach() 46 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_map() argument 49 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map() 85 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_unmap() argument 88 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap() [all …]
|
| D | msm_gem.c | 297 struct msm_mmu *mmu = priv->mmus[id]; in msm_gem_get_iova_locked() local 300 if (WARN_ON(!mmu)) in msm_gem_get_iova_locked() 304 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, in msm_gem_get_iova_locked() 525 struct msm_mmu *mmu = priv->mmus[id]; in msm_gem_free_object() local 526 if (mmu && msm_obj->domain[id].iova) { in msm_gem_free_object() 528 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); in msm_gem_free_object()
|
| D | msm_gpu.c | 653 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); in msm_gpu_init() 654 if (IS_ERR(gpu->mmu)) { in msm_gpu_init() 655 ret = PTR_ERR(gpu->mmu); in msm_gpu_init() 657 gpu->mmu = NULL; in msm_gpu_init() 665 gpu->id = msm_register_mmu(drm, gpu->mmu); in msm_gpu_init() 701 if (gpu->mmu) in msm_gpu_cleanup() 702 gpu->mmu->funcs->destroy(gpu->mmu); in msm_gpu_cleanup()
|
| D | msm_gpu.h | 98 struct msm_mmu *mmu; member
|
| D | msm_drv.h | 184 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
| D | msm_drv.c | 36 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) in msm_register_mmu() argument 44 priv->mmus[idx] = mmu; in msm_register_mmu()
|
| /linux-4.4.14/arch/metag/mm/ |
| D | Makefile | 12 mmu-y := mmu-meta1.o 13 mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o 14 obj-y += $(mmu-y)
|
| /linux-4.4.14/arch/powerpc/kvm/ |
| D | book3s_32_mmu.c | 414 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_32_init() local 416 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; in kvmppc_mmu_book3s_32_init() 417 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; in kvmppc_mmu_book3s_32_init() 418 mmu->xlate = kvmppc_mmu_book3s_32_xlate; in kvmppc_mmu_book3s_32_init() 419 mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; in kvmppc_mmu_book3s_32_init() 420 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; in kvmppc_mmu_book3s_32_init() 421 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; in kvmppc_mmu_book3s_32_init() 422 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; in kvmppc_mmu_book3s_32_init() 423 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; in kvmppc_mmu_book3s_32_init() 425 mmu->slbmte = NULL; in kvmppc_mmu_book3s_32_init() [all …]
|
| D | book3s_emulate.c | 156 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr() 158 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); in kvmppc_core_emulate_op_pr() 168 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr() 170 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); in kvmppc_core_emulate_op_pr() 176 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 181 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 190 vcpu->arch.mmu.tlbie(vcpu, addr, large); in kvmppc_core_emulate_op_pr() 224 if (!vcpu->arch.mmu.slbmte) in kvmppc_core_emulate_op_pr() 227 vcpu->arch.mmu.slbmte(vcpu, in kvmppc_core_emulate_op_pr() 232 if (!vcpu->arch.mmu.slbie) in kvmppc_core_emulate_op_pr() [all …]
|
| D | book3s_64_mmu.c | 658 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_64_init() local 660 mmu->mfsrin = NULL; in kvmppc_mmu_book3s_64_init() 661 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; in kvmppc_mmu_book3s_64_init() 662 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; in kvmppc_mmu_book3s_64_init() 663 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; in kvmppc_mmu_book3s_64_init() 664 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; in kvmppc_mmu_book3s_64_init() 665 mmu->slbie = kvmppc_mmu_book3s_64_slbie; in kvmppc_mmu_book3s_64_init() 666 mmu->slbia = kvmppc_mmu_book3s_64_slbia; in kvmppc_mmu_book3s_64_init() 667 mmu->xlate = kvmppc_mmu_book3s_64_xlate; in kvmppc_mmu_book3s_64_init() 668 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; in kvmppc_mmu_book3s_64_init() [all …]
|
| D | book3s_64_mmu_host.c | 118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 222 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page() 322 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
|
| D | book3s_pr_papr.c | 109 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); in kvmppc_h_pr_remove() 191 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); in kvmppc_h_pr_bulk_remove() 234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); in kvmppc_h_pr_protect()
|
| D | book3s_pr.c | 430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && in kvmppc_set_pvr_pr() 550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault() 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault() 585 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 631 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 930 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr() 1287 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, in kvm_arch_vcpu_ioctl_set_sregs_pr() 1292 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); in kvm_arch_vcpu_ioctl_set_sregs_pr()
|
| D | book3s_32_mmu_host.c | 170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
|
| D | book3s_64_mmu_hv.c | 1636 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_hv_init() local 1640 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; in kvmppc_mmu_book3s_hv_init() 1641 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; in kvmppc_mmu_book3s_hv_init()
|
| D | book3s.c | 130 vcpu->arch.mmu.reset_msr(vcpu); in kvmppc_inject_interrupt() 404 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate()
|
| /linux-4.4.14/fs/ramfs/ |
| D | Makefile | 7 file-mmu-y := file-nommu.o 8 file-mmu-$(CONFIG_MMU) := file-mmu.o 9 ramfs-objs += inode.o $(file-mmu-y)
|
| /linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp5/ |
| D | mdp5_kms.c | 133 struct msm_mmu *mmu = mdp5_kms->mmu; in mdp5_destroy() local 137 if (mmu) { in mdp5_destroy() 138 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); in mdp5_destroy() 139 mmu->funcs->destroy(mmu); in mdp5_destroy() 477 struct msm_mmu *mmu; in mdp5_kms_init() local 598 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); in mdp5_kms_init() 599 if (IS_ERR(mmu)) { in mdp5_kms_init() 600 ret = PTR_ERR(mmu); in mdp5_kms_init() 606 ret = mmu->funcs->attach(mmu, iommu_ports, in mdp5_kms_init() 610 mmu->funcs->destroy(mmu); in mdp5_kms_init() [all …]
|
| D | mdp5_kms.h | 40 struct msm_mmu *mmu; member
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/device/ |
| D | base.c | 87 .mmu = nv04_mmu_new, 108 .mmu = nv04_mmu_new, 130 .mmu = nv04_mmu_new, 150 .mmu = nv04_mmu_new, 172 .mmu = nv04_mmu_new, 194 .mmu = nv04_mmu_new, 216 .mmu = nv04_mmu_new, 238 .mmu = nv04_mmu_new, 260 .mmu = nv04_mmu_new, 282 .mmu = nv04_mmu_new, [all …]
|
| /linux-4.4.14/arch/um/kernel/skas/ |
| D | mmu.c | 141 struct mm_context *mmu = &mm->context; in destroy_context() local 149 if (mmu->id.u.pid < 2) { in destroy_context() 151 mmu->id.u.pid); in destroy_context() 154 os_kill_ptraced_process(mmu->id.u.pid, 1); in destroy_context() 156 free_page(mmu->id.stack); in destroy_context() 157 free_ldt(mmu); in destroy_context()
|
| D | Makefile | 6 obj-y := clone.o mmu.o process.o syscall.o uaccess.o
|
| /linux-4.4.14/arch/m68k/kernel/ |
| D | setup_mm.c | 407 const char *cpu, *mmu, *fpu; in show_cpuinfo() local 456 mmu = "68851"; in show_cpuinfo() 458 mmu = "68030"; in show_cpuinfo() 460 mmu = "68040"; in show_cpuinfo() 462 mmu = "68060"; in show_cpuinfo() 464 mmu = "Sun-3"; in show_cpuinfo() 466 mmu = "Apollo"; in show_cpuinfo() 468 mmu = "ColdFire"; in show_cpuinfo() 470 mmu = "unknown"; in show_cpuinfo() 480 cpu, mmu, fpu, in show_cpuinfo()
|
| D | setup_no.c | 274 char *cpu, *mmu, *fpu; in show_cpuinfo() local 278 mmu = "none"; in show_cpuinfo() 288 cpu, mmu, fpu, in show_cpuinfo()
|
| /linux-4.4.14/arch/x86/kvm/ |
| D | mmu.c | 1907 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page() 1956 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages() 2094 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page() 2100 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page() 2101 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page() 2156 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init() 2157 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init() 2160 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init() 2161 !vcpu->arch.mmu.direct_map) in shadow_walk_init() 2166 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init() [all …]
|
| D | paging_tmpl.h | 140 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 167 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME() 200 struct kvm_mmu *mmu, in FNAME() 247 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME() 261 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 280 walker->level = mmu->root_level; in FNAME() 281 pte = mmu->get_cr3(vcpu); in FNAME() 285 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); in FNAME() 314 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), in FNAME() 348 if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { in FNAME() [all …]
|
| D | mmu.h | 89 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) in kvm_mmu_reload() 147 static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument 172 return (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
|
| D | mmu_audit.c | 62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_spte_walk() 65 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_spte_walk() 66 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_spte_walk() 74 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_spte_walk() 125 "ent %llxn", vcpu->arch.mmu.root_level, pfn, in audit_mappings()
|
| D | Makefile | 14 kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
|
| D | x86.c | 439 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault() 491 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_read_guest_page_mmu() argument 500 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu() 520 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument 526 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs() 528 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, in load_pdptrs() 538 vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { in load_pdptrs() 545 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs() 4116 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa() 5120 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction() [all …]
|
| D | vmx.c | 3610 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __vmx_flush_tlb() 3612 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); in __vmx_flush_tlb() 3646 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_load_pdptrs() local 3653 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); in ept_load_pdptrs() 3654 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); in ept_load_pdptrs() 3655 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); in ept_load_pdptrs() 3656 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); in ept_load_pdptrs() 3662 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs() local 3665 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); in ept_save_pdptrs() 3666 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); in ept_save_pdptrs() [all …]
|
| D | svm.c | 1924 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; in nested_svm_init_mmu_context() 1925 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; in nested_svm_init_mmu_context() 1926 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; in nested_svm_init_mmu_context() 1927 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; in nested_svm_init_mmu_context() 1928 vcpu->arch.mmu.shadow_root_level = get_npt_level(); in nested_svm_init_mmu_context() 1929 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); in nested_svm_init_mmu_context() 1935 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_svm_uninit_mmu_context()
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
| D | usernv04.c | 52 struct nv04_mmu *mmu = nv04_mmu(device->mmu); in nv04_dmaobj_bind() local 53 struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0]; in nv04_dmaobj_bind() 98 if (device->mmu->func == &nv04_mmu) in nv04_dmaobj_new()
|
| /linux-4.4.14/Documentation/devicetree/bindings/iommu/ |
| D | arm,smmu.txt | 16 "arm,mmu-400" 17 "arm,mmu-401" 18 "arm,mmu-500" 37 - mmu-masters : A list of phandles to device nodes representing bus 75 mmu-masters = <&dma0 0xd01d 0xd01e>,
|
| D | ti,omap-iommu.txt | 33 mmu_isp: mmu@480bd400 { 43 mmu0_dsp2: mmu@41501000 { 52 mmu1_dsp2: mmu@41502000 {
|
| D | renesas,ipmmu-vmsa.txt | 29 ipmmu_mx: mmu@fe951000 {
|
| /linux-4.4.14/drivers/gpu/drm/gma500/ |
| D | psb_drv.c | 182 if (dev_priv->mmu) { in psb_driver_unload() 188 (dev_priv->mmu), in psb_driver_unload() 192 psb_mmu_driver_takedown(dev_priv->mmu); in psb_driver_unload() 193 dev_priv->mmu = NULL; in psb_driver_unload() 328 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); in psb_driver_load() 329 if (!dev_priv->mmu) in psb_driver_load() 332 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); in psb_driver_load() 342 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu), in psb_driver_load() 348 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); in psb_driver_load()
|
| D | Makefile | 15 mmu.o \
|
| D | gtt.c | 262 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), in psb_gtt_pin() 304 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), in psb_gtt_unpin()
|
| D | psb_drv.h | 468 struct psb_mmu_driver *mmu; member
|
| /linux-4.4.14/arch/m32r/mm/ |
| D | Makefile | 6 obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o 8 obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
|
| /linux-4.4.14/mm/ |
| D | Makefile | 8 mmu-y := nommu.o 9 mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 14 mmu-$(CONFIG_MMU) += process_vm_access.o 24 debug.o $(mmu-y)
|
| /linux-4.4.14/arch/sh/mm/ |
| D | Makefile | 17 mmu-y := nommu.o extable_32.o 18 mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \ 21 obj-y += $(mmu-y)
|
| /linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp4/ |
| D | mdp4_kms.c | 411 struct msm_mmu *mmu; in mdp4_kms_init() local 500 mmu = msm_iommu_new(&pdev->dev, config->iommu); in mdp4_kms_init() 501 if (IS_ERR(mmu)) { in mdp4_kms_init() 502 ret = PTR_ERR(mmu); in mdp4_kms_init() 505 ret = mmu->funcs->attach(mmu, iommu_ports, in mdp4_kms_init() 512 mmu = NULL; in mdp4_kms_init() 515 mdp4_kms->id = msm_register_mmu(dev, mmu); in mdp4_kms_init()
|
| /linux-4.4.14/Documentation/virtual/kvm/ |
| D | 00-INDEX | 13 mmu.txt 14 - the x86 kvm shadow mmu.
|
| D | locking.txt | 15 the mmu-lock on x86. Currently, the page fault can be fast only if the 22 - SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when 23 the gfn is writable on guest mmu and it is not write-protected by shadow 114 if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means, 122 As mentioned before, the spte can be updated to writable out of mmu-lock on 127 Since the spte is "volatile" if it can be updated out of mmu-lock, we always 157 Comment: it is a spinlock since it is used in mmu notifier.
|
| D | mmu.txt | 1 The x86 kvm shadow mmu 4 The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible 5 for presenting a standard x86 mmu to the guest, while translating guest 8 The mmu code attempts to satisfy the following requirements: 11 on an emulated mmu except for timing (we attempt to comply 16 - performance: minimize the performance penalty imposed by the mmu 48 The mmu supports first-generation mmu hardware, which allows an atomic switch 51 it exposes is the traditional 2/3/4 level x86 mmu, with support for global 58 The primary job of the mmu is to program the processor's mmu to translate 72 number of required translations matches the hardware, the mmu operates in [all …]
|
| D | api.txt | 3440 addresses of mmu-type-specific data structures. The "array_len" field is an 3454 For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/ |
| D | nouveau_chan.c | 93 struct nvkm_mmu *mmu = nvxx_mmu(device); in nouveau_channel_prep() local 141 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_prep() 171 args.limit = mmu->limit - 1; in nouveau_channel_prep() 297 struct nvkm_mmu *mmu = nvxx_mmu(device); in nouveau_channel_init() local 309 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_init() 326 args.limit = cli->vm->mmu->limit - 1; in nouveau_channel_init() 338 args.limit = mmu->limit - 1; in nouveau_channel_init()
|
| D | nouveau_ttm.c | 199 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); in nv04_gart_manager_init() local 200 struct nv04_mmu *priv = (void *)mmu; in nv04_gart_manager_init()
|
| D | nouveau_bo.c | 191 lpg_shift = drm->client.vm->mmu->lpg_shift; in nouveau_bo_new() 218 nvbo->page_shift = drm->client.vm->mmu->lpg_shift; in nouveau_bo_new() 1239 nvbo->page_shift != vma->vm->mmu->lpg_shift)) { in nouveau_bo_move_ntfy() 1642 nvbo->page_shift != vma->vm->mmu->lpg_shift)) in nouveau_bo_vma_add()
|
| /linux-4.4.14/arch/sparc/kernel/ |
| D | wuf.S | 265 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg 266 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg 281 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again 282 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
|
| /linux-4.4.14/drivers/gpu/drm/msm/adreno/ |
| D | adreno_gpu.c | 336 struct msm_mmu *mmu; in adreno_gpu_init() local 375 mmu = gpu->mmu; in adreno_gpu_init() 376 if (mmu) { in adreno_gpu_init() 377 ret = mmu->funcs->attach(mmu, iommu_ports, in adreno_gpu_init()
|
| D | a4xx_gpu.c | 585 if (!gpu->mmu) { in a4xx_gpu_init()
|
| D | a3xx_gpu.c | 586 if (!gpu->mmu) { in a3xx_gpu_init()
|
| /linux-4.4.14/arch/arm/lib/ |
| D | Makefile | 18 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \ 24 lib-$(CONFIG_MMU) += $(mmu-y)
|
| /linux-4.4.14/Documentation/frv/ |
| D | README.txt | 32 (*) mmu-layout.txt 41 MMU state on the FR451. See mmu-layout.txt for more information.
|
| D | configuring.txt | 102 (*) defconfig-mmu 107 (*) defconfig-mmu-audio 113 (*) defconfig-mmu-fb 119 (*) defconfig-mmu-standalone
|
| D | kernel-ABI.txt | 104 SCR0 MMU See mmu-layout.txt. 105 SCR1 MMU See mmu-layout.txt. 110 DAMR/IAMR MMU See mmu-layout.txt.
|
| D | features.txt | 59 See mmu-layout.txt in this directory for a description of the normal linux
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
| D | chan.c | 356 struct nvkm_mmu *mmu = device->mmu; in nvkm_fifo_chan_ctor() local 385 if (!vm && mmu) { in nvkm_fifo_chan_ctor() 386 if (!client->vm || client->vm->mmu == mmu) { in nvkm_fifo_chan_ctor()
|
| /linux-4.4.14/arch/xtensa/mm/ |
| D | Makefile | 6 obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
|
| /linux-4.4.14/arch/nios2/kernel/ |
| D | cpuinfo.c | 75 cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); in setup_cpuinfo() 139 cpuinfo.mmu ? "present" : "none", in show_cpuinfo()
|
| /linux-4.4.14/arch/frv/mm/ |
| D | Makefile | 9 mmu-context.o dma-alloc.o elf-fdpic.o
|
| /linux-4.4.14/arch/c6x/kernel/ |
| D | setup.c | 78 const char *mmu; member 121 p->mmu = "none"; in get_cpuinfo() 470 p->core_id, p->mmu, p->fpu, in show_cpuinfo()
|
| /linux-4.4.14/arch/unicore32/mm/ |
| D | Makefile | 5 obj-y := extable.o fault.o init.o pgd.o mmu.o
|
| D | proc-ucv2.S | 49 movc p0.c1, ip, #0 @ disable caches and mmu
|
| /linux-4.4.14/arch/arm/boot/dts/ |
| D | dra74x.dtsi | 85 mmu0_dsp2: mmu@41501000 { 95 mmu1_dsp2: mmu@41502000 {
|
| D | r8a7794.dtsi | 1085 ipmmu_sy0: mmu@e6280000 { 1094 ipmmu_sy1: mmu@e6290000 { 1102 ipmmu_ds: mmu@e6740000 { 1110 ipmmu_mp: mmu@ec680000 { 1118 ipmmu_mx: mmu@fe951000 { 1126 ipmmu_gp: mmu@e62a0000 {
|
| D | r8a7791.dtsi | 1581 ipmmu_sy0: mmu@e6280000 { 1590 ipmmu_sy1: mmu@e6290000 { 1598 ipmmu_ds: mmu@e6740000 { 1607 ipmmu_mp: mmu@ec680000 { 1615 ipmmu_mx: mmu@fe951000 { 1624 ipmmu_rt: mmu@ffc80000 { 1632 ipmmu_gp: mmu@e62a0000 {
|
| D | r8a7790.dtsi | 1768 ipmmu_sy0: mmu@e6280000 { 1777 ipmmu_sy1: mmu@e6290000 { 1785 ipmmu_ds: mmu@e6740000 { 1794 ipmmu_mp: mmu@ec680000 { 1802 ipmmu_mx: mmu@fe951000 { 1811 ipmmu_rt: mmu@ffc80000 {
|
| D | omap3.dtsi | 459 mmu_isp: mmu@480bd400 { 468 mmu_iva: mmu@5d000000 {
|
| D | dra7.dtsi | 919 mmu0_dsp1: mmu@40d01000 { 929 mmu1_dsp1: mmu@40d02000 { 939 mmu_ipu1: mmu@58882000 { 949 mmu_ipu2: mmu@55082000 {
|
| D | omap4.dtsi | 551 mmu_dsp: mmu@4a066000 { 559 mmu_ipu: mmu@55082000 {
|
| D | omap5.dtsi | 612 mmu_dsp: mmu@4a066000 { 620 mmu_ipu: mmu@55082000 {
|
| /linux-4.4.14/arch/nios2/include/asm/ |
| D | cpuinfo.h | 28 u32 mmu; member
|
| /linux-4.4.14/arch/arm64/mm/ |
| D | Makefile | 3 ioremap.o mmap.o pgd.o mmu.o \
|
| /linux-4.4.14/Documentation/devicetree/bindings/nios2/ |
| D | nios2.txt | 27 - altr,has-mmu: Specifies CPU support MMU support, should be 1. 61 altr,has-mmu = <1>;
|
| /linux-4.4.14/arch/x86/xen/ |
| D | Makefile | 13 obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
|
| /linux-4.4.14/arch/frv/kernel/ |
| D | Makefile | 6 heads-$(CONFIG_MMU) := head-mmu-fr451.o
|
| /linux-4.4.14/arch/microblaze/boot/dts/ |
| D | system.dts | 104 xlnx,mmu-dtlb-size = <0x4>; 105 xlnx,mmu-itlb-size = <0x2>; 106 xlnx,mmu-tlb-access = <0x3>; 107 xlnx,mmu-zones = <0x10>; 128 xlnx,use-mmu = <0x3>;
|
| /linux-4.4.14/arch/arm/kvm/ |
| D | Makefile | 21 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
|
| /linux-4.4.14/arch/microblaze/include/asm/ |
| D | cpuinfo.h | 40 u32 mmu; member
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
| D | mmu.h | 28 struct nvkm_mmu *mmu; member
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/ |
| D | Kbuild | 14 include $(src)/nvkm/subdev/mmu/Kbuild
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/include/nvif/ |
| D | device.h | 60 #define nvxx_mmu(a) nvxx_device(a)->mmu
|
| /linux-4.4.14/arch/arm64/kvm/ |
| D | Makefile | 15 kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
|
| /linux-4.4.14/arch/mn10300/mm/ |
| D | Makefile | 28 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
|
| /linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/ |
| D | Makefile | 58 REGDESC += $(BASEDIR)/core/cpu/mmu/doc/mmu_regs.r 121 mmu_defs_asm.h: $(BASEDIR)/core/cpu/mmu/doc/mmu_regs.r
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/ |
| D | device.h | 115 struct nvkm_mmu *mmu; member 180 int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **); member
|
| /linux-4.4.14/arch/microblaze/kernel/cpu/ |
| D | mb.c | 74 seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu); in show_cpuinfo()
|
| D | cpuinfo-pvr-full.c | 73 CI(mmu, USE_MMU); in set_cpuinfo_pvr_full()
|
| D | cpuinfo-static.c | 119 ci->mmu = fcpu(cpu, "xlnx,use-mmu"); in set_cpuinfo_static()
|
| /linux-4.4.14/arch/c6x/include/asm/ |
| D | Kbuild | 31 generic-y += mmu.h
|
| /linux-4.4.14/arch/h8300/include/asm/ |
| D | Kbuild | 38 generic-y += mmu.h
|
| /linux-4.4.14/arch/powerpc/boot/dts/fsl/ |
| D | e500v2_power_isa.dtsi | 50 mmu-type = "power-embedded";
|
| D | e5500_power_isa.dtsi | 58 mmu-type = "power-embedded";
|
| D | e500mc_power_isa.dtsi | 57 mmu-type = "power-embedded";
|
| D | e6500_power_isa.dtsi | 63 mmu-type = "power-embedded";
|
| /linux-4.4.14/arch/arc/include/asm/ |
| D | arcregs.h | 348 struct cpuinfo_arc_mmu mmu; member
|
| /linux-4.4.14/arch/nios2/boot/dts/ |
| D | 3c120_devboard.dts | 54 altr,has-mmu = <1>;
|
| D | 10m50_devboard.dts | 39 altr,has-mmu = <1>;
|
| /linux-4.4.14/arch/unicore32/kernel/ |
| D | sleep.S | 176 movc p0.c1, r5, #0 @ control reg, turn on mmu
|
| /linux-4.4.14/arch/arm/kernel/ |
| D | head.S | 150 @ mmu has been enabled 399 sub lr, r4, r5 @ mmu has been enabled
|
| /linux-4.4.14/arch/arm/mm/ |
| D | Makefile | 9 mmap.o pgd.o mmu.o pageattr.o
|
| D | proc-xsc3.S | 479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
|
| /linux-4.4.14/arch/x86/include/asm/ |
| D | kvm_host.h | 419 struct kvm_mmu mmu; member 953 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1047 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
| /linux-4.4.14/Documentation/trace/ |
| D | stm.txt | 64 mmu) will usually contain multiple channels' mmios, so the user will
|
| /linux-4.4.14/arch/arm64/boot/dts/freescale/ |
| D | fsl-ls2080a.dtsi | 203 compatible = "arm,mmu-500"; 248 mmu-masters = <&fsl_mc 0x300 0>;
|
| /linux-4.4.14/arch/blackfin/kernel/ |
| D | setup.c | 1272 char *cpu, *mmu, *fpu, *vendor, *cache; local 1280 mmu = "none";
|
| /linux-4.4.14/arch/arm64/boot/dts/xilinx/ |
| D | zynqmp.dtsi | 251 compatible = "arm,mmu-500";
|
| /linux-4.4.14/arch/powerpc/include/asm/ |
| D | kvm_host.h | 418 struct kvmppc_mmu mmu; member
|
| /linux-4.4.14/Documentation/cris/ |
| D | README | 153 mmu : yes
|
| /linux-4.4.14/arch/sparc/mm/ |
| D | init_64.c | 565 unsigned long mmu) in hypervisor_tlb_lock() argument 567 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); in hypervisor_tlb_lock() 571 "errors with %lx\n", vaddr, 0, pte, mmu, ret); in hypervisor_tlb_lock()
|
| /linux-4.4.14/arch/cris/arch-v10/ |
| D | README.mm | 153 [ References: asm/pgtable.h, asm/page.h, asm/mmu.h ]
|
| /linux-4.4.14/Documentation/ |
| D | 00-INDEX | 339 - documentation about no-mmu memory mapping support.
|
| /linux-4.4.14/arch/arm/boot/compressed/ |
| D | head.S | 809 orr r0, r0, #0x000d @ Write buffer, mmu
|
| /linux-4.4.14/arch/sh/kernel/cpu/sh5/ |
| D | entry.S | 615 ! the mmu back on
|