Searched refs:mmu (Results 1 - 200 of 377) sorted by relevance

12

/linux-4.4.14/fs/ramfs/
H A DMakefile7 file-mmu-y := file-nommu.o
8 file-mmu-$(CONFIG_MMU) := file-mmu.o
9 ramfs-objs += inode.o $(file-mmu-y)
H A Dfile-mmu.c1 /* file-mmu.c: ramfs MMU-based file operations
/linux-4.4.14/arch/metag/mm/
H A DMakefile12 mmu-y := mmu-meta1.o
13 mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o
14 obj-y += $(mmu-y)
H A Dmmu-meta2.c15 #include <asm/mmu.h>
87 * Check that all the mmu table regions are priv protected, and if not repriv_mmu_tables()
145 * Now copy over any MMU pgd entries already in the mmu page tables mmu_init()
H A Dmmu-meta1.c12 #include <asm/mmu.h>
138 * Now copy over any MMU pgd entries already in the mmu page tables mmu_init()
H A Dinit.c19 #include <asm/mmu.h>
81 * table (swapper_pg_dir). This is because with a META1 mmu we user_gateway_init()
H A Dfault.c15 #include <asm/mmu.h>
/linux-4.4.14/arch/cris/arch-v32/mm/
H A Dinit.c16 #include <asm/mmu.h>
50 mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | cris_mmu_init()
51 REG_STATE(mmu, rw_mm_cfg, acc, on) | cris_mmu_init()
52 REG_STATE(mmu, rw_mm_cfg, ex, on) | cris_mmu_init()
53 REG_STATE(mmu, rw_mm_cfg, inv, on) | cris_mmu_init()
55 REG_STATE(mmu, rw_mm_cfg, seg_f, page) | cris_mmu_init()
56 REG_STATE(mmu, rw_mm_cfg, seg_e, page) | cris_mmu_init()
57 REG_STATE(mmu, rw_mm_cfg, seg_d, linear) | cris_mmu_init()
59 REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | cris_mmu_init()
60 REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | cris_mmu_init()
61 REG_STATE(mmu, rw_mm_cfg, seg_d, page) | cris_mmu_init()
63 REG_STATE(mmu, rw_mm_cfg, seg_c, linear) | cris_mmu_init()
64 REG_STATE(mmu, rw_mm_cfg, seg_b, linear) | cris_mmu_init()
65 REG_STATE(mmu, rw_mm_cfg, seg_a, page) | cris_mmu_init()
66 REG_STATE(mmu, rw_mm_cfg, seg_9, page) | cris_mmu_init()
67 REG_STATE(mmu, rw_mm_cfg, seg_8, page) | cris_mmu_init()
68 REG_STATE(mmu, rw_mm_cfg, seg_7, page) | cris_mmu_init()
69 REG_STATE(mmu, rw_mm_cfg, seg_6, page) | cris_mmu_init()
70 REG_STATE(mmu, rw_mm_cfg, seg_5, page) | cris_mmu_init()
71 REG_STATE(mmu, rw_mm_cfg, seg_4, page) | cris_mmu_init()
72 REG_STATE(mmu, rw_mm_cfg, seg_3, page) | cris_mmu_init()
73 REG_STATE(mmu, rw_mm_cfg, seg_2, page) | cris_mmu_init()
74 REG_STATE(mmu, rw_mm_cfg, seg_1, page) | cris_mmu_init()
75 REG_STATE(mmu, rw_mm_cfg, seg_0, page)); cris_mmu_init()
78 mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) | cris_mmu_init()
80 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x0) | cris_mmu_init()
81 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x5) | cris_mmu_init()
83 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) | cris_mmu_init()
84 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) | cris_mmu_init()
86 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) | cris_mmu_init()
87 REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) | cris_mmu_init()
88 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) | cris_mmu_init()
89 REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) | cris_mmu_init()
90 REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0)); cris_mmu_init()
92 mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) | cris_mmu_init()
93 REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) | cris_mmu_init()
94 REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) | cris_mmu_init()
95 REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) | cris_mmu_init()
96 REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) | cris_mmu_init()
97 REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) | cris_mmu_init()
98 REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) | cris_mmu_init()
99 REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0)); cris_mmu_init()
101 mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0); cris_mmu_init()
H A Dtlb.c19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
48 int mmu; __flush_tlb_all() local
59 for (mmu = 1; mmu <= 2; mmu++) { __flush_tlb_all()
60 SUPP_BANK_SEL(mmu); /* Select the MMU */ __flush_tlb_all()
63 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); __flush_tlb_all()
65 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) __flush_tlb_all()
66 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); __flush_tlb_all()
82 int mmu; __flush_tlb_mm() local
96 for (mmu = 1; mmu <= 2; mmu++) { __flush_tlb_mm()
97 SUPP_BANK_SEL(mmu); __flush_tlb_mm()
106 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, __flush_tlb_mm()
108 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, __flush_tlb_mm()
124 int mmu; __flush_tlb_page() local
143 for (mmu = 1; mmu <= 2; mmu++) { __flush_tlb_page()
144 SUPP_BANK_SEL(mmu); __flush_tlb_page()
152 mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid, __flush_tlb_page()
H A Dmmu.S39 .macro MMU_BUS_FAULT_HANDLER handler, mmu, we, ex
44 move \mmu, $srs ; Select MMU support register bank
82 .macro MMU_REFILL_HANDLER handler, mmu
95 move \mmu, $srs ; Select MMU support register bank
185 move \mmu, $srs
/linux-4.4.14/arch/cris/include/asm/
H A Dmmu.h8 #include <arch/mmu.h>
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c33 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_at() local
35 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_at()
38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; nvkm_vm_map_at()
39 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; nvkm_vm_map_at()
40 u32 max = 1 << (mmu->func->pgt_bits - bits); nvkm_vm_map_at()
56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); nvkm_vm_map_at()
70 mmu->func->flush(vm); nvkm_vm_map_at()
78 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_sg_table() local
79 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_sg_table()
83 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; nvkm_vm_map_sg_table()
84 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; nvkm_vm_map_sg_table()
85 u32 max = 1 << (mmu->func->pgt_bits - bits); nvkm_vm_map_sg_table()
103 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr); nvkm_vm_map_sg_table()
118 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr); nvkm_vm_map_sg_table()
128 mmu->func->flush(vm); nvkm_vm_map_sg_table()
136 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_sg() local
138 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_sg()
142 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; nvkm_vm_map_sg()
143 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; nvkm_vm_map_sg()
144 u32 max = 1 << (mmu->func->pgt_bits - bits); nvkm_vm_map_sg()
155 mmu->func->map_sg(vma, pgt, mem, pte, len, list); nvkm_vm_map_sg()
166 mmu->func->flush(vm); nvkm_vm_map_sg()
185 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_unmap_at() local
186 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_unmap_at()
190 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; nvkm_vm_unmap_at()
191 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; nvkm_vm_unmap_at()
192 u32 max = 1 << (mmu->func->pgt_bits - bits); nvkm_vm_unmap_at()
203 mmu->func->unmap(vma, pgt, pte, len); nvkm_vm_unmap_at()
213 mmu->func->flush(vm); nvkm_vm_unmap_at()
225 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_unmap_pgt() local
240 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); nvkm_vm_unmap_pgt()
250 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_pgt() local
253 int big = (type != mmu->func->spg_shift); nvkm_vm_map_pgt()
257 pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type; nvkm_vm_map_pgt()
260 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, nvkm_vm_map_pgt()
266 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); nvkm_vm_map_pgt()
277 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_get() local
291 fpde = (vma->node->offset >> mmu->func->pgt_bits); nvkm_vm_get()
292 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; nvkm_vm_get()
296 int big = (vma->node->type != mmu->func->spg_shift); nvkm_vm_get()
324 struct nvkm_mmu *mmu; nvkm_vm_put() local
331 mmu = vm->mmu; nvkm_vm_put()
333 fpde = (vma->node->offset >> mmu->func->pgt_bits); nvkm_vm_put()
334 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; nvkm_vm_put()
337 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); nvkm_vm_put()
347 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_boot() local
351 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, nvkm_vm_boot()
352 (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt); nvkm_vm_boot()
363 nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, nvkm_vm_create() argument
377 vm->mmu = mmu; nvkm_vm_create()
379 vm->fpde = offset >> (mmu->func->pgt_bits + 12); nvkm_vm_create()
380 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12); nvkm_vm_create()
405 struct nvkm_mmu *mmu = device->mmu; nvkm_vm_new() local
406 if (!mmu->func->create) nvkm_vm_new()
408 return mmu->func->create(mmu, offset, length, mm_offset, key, pvm); nvkm_vm_new()
414 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_link() local
429 mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem); nvkm_vm_link()
492 struct nvkm_mmu *mmu = nvkm_mmu(subdev); nvkm_mmu_oneinit() local
493 if (mmu->func->oneinit) nvkm_mmu_oneinit()
494 return mmu->func->oneinit(mmu); nvkm_mmu_oneinit()
501 struct nvkm_mmu *mmu = nvkm_mmu(subdev); nvkm_mmu_init() local
502 if (mmu->func->init) nvkm_mmu_init()
503 mmu->func->init(mmu); nvkm_mmu_init()
510 struct nvkm_mmu *mmu = nvkm_mmu(subdev); nvkm_mmu_dtor() local
511 if (mmu->func->dtor) nvkm_mmu_dtor()
512 return mmu->func->dtor(mmu); nvkm_mmu_dtor()
513 return mmu; nvkm_mmu_dtor()
525 int index, struct nvkm_mmu *mmu) nvkm_mmu_ctor()
527 nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev); nvkm_mmu_ctor()
528 mmu->func = func; nvkm_mmu_ctor()
529 mmu->limit = func->limit; nvkm_mmu_ctor()
530 mmu->dma_bits = func->dma_bits; nvkm_mmu_ctor()
531 mmu->lpg_shift = func->lpg_shift; nvkm_mmu_ctor()
524 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device, int index, struct nvkm_mmu *mmu) nvkm_mmu_ctor() argument
H A Dnv04.c78 struct nv04_mmu *mmu = nv04_mmu(base); nv04_mmu_oneinit() local
79 struct nvkm_device *device = mmu->base.subdev.device; nv04_mmu_oneinit()
83 ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL, nv04_mmu_oneinit()
84 &mmu->vm); nv04_mmu_oneinit()
91 mmu->vm->pgt[0].mem[0] = dma; nv04_mmu_oneinit()
92 mmu->vm->pgt[0].refcount[0] = 1; nv04_mmu_oneinit()
106 struct nv04_mmu *mmu = nv04_mmu(base); nv04_mmu_dtor() local
107 struct nvkm_device *device = mmu->base.subdev.device; nv04_mmu_dtor()
108 if (mmu->vm) { nv04_mmu_dtor()
109 nvkm_memory_del(&mmu->vm->pgt[0].mem[0]); nv04_mmu_dtor()
110 nvkm_vm_ref(NULL, &mmu->vm, NULL); nv04_mmu_dtor()
112 if (mmu->nullp) { nv04_mmu_dtor()
114 mmu->nullp, mmu->null); nv04_mmu_dtor()
116 return mmu; nv04_mmu_dtor()
123 struct nv04_mmu *mmu; nv04_mmu_new_() local
124 if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL))) nv04_mmu_new_()
126 *pmmu = &mmu->base; nv04_mmu_new_()
127 nvkm_mmu_ctor(func, device, index, &mmu->base); nv04_mmu_new_()
H A Dnv41.c71 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); nv41_vm_flush() local
72 struct nvkm_device *device = mmu->base.subdev.device; nv41_vm_flush()
74 mutex_lock(&mmu->base.subdev.mutex); nv41_vm_flush()
81 mutex_unlock(&mmu->base.subdev.mutex); nv41_vm_flush()
91 struct nv04_mmu *mmu = nv04_mmu(base); nv41_mmu_oneinit() local
92 struct nvkm_device *device = mmu->base.subdev.device; nv41_mmu_oneinit()
95 ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL, nv41_mmu_oneinit()
96 &mmu->vm); nv41_mmu_oneinit()
102 &mmu->vm->pgt[0].mem[0]); nv41_mmu_oneinit()
103 mmu->vm->pgt[0].refcount[0] = 1; nv41_mmu_oneinit()
110 struct nv04_mmu *mmu = nv04_mmu(base); nv41_mmu_init() local
111 struct nvkm_device *device = mmu->base.subdev.device; nv41_mmu_init()
112 struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0]; nv41_mmu_init()
H A Dnv44.c87 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); nv44_vm_map_sg() local
95 nv44_vm_fill(pgt, mmu->null, list, pte, part); nv44_vm_map_sg()
112 nv44_vm_fill(pgt, mmu->null, list, pte, cnt); nv44_vm_map_sg()
119 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); nv44_vm_unmap() local
125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part); nv44_vm_unmap()
139 nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt); nv44_vm_unmap()
146 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); nv44_vm_flush() local
147 struct nvkm_device *device = mmu->base.subdev.device; nv44_vm_flush()
148 nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE); nv44_vm_flush()
164 struct nv04_mmu *mmu = nv04_mmu(base); nv44_mmu_oneinit() local
165 struct nvkm_device *device = mmu->base.subdev.device; nv44_mmu_oneinit()
168 mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024, nv44_mmu_oneinit()
169 &mmu->null, GFP_KERNEL); nv44_mmu_oneinit()
170 if (!mmu->nullp) { nv44_mmu_oneinit()
171 nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n"); nv44_mmu_oneinit()
172 mmu->null = 0; nv44_mmu_oneinit()
175 ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL, nv44_mmu_oneinit()
176 &mmu->vm); nv44_mmu_oneinit()
183 &mmu->vm->pgt[0].mem[0]); nv44_mmu_oneinit()
184 mmu->vm->pgt[0].refcount[0] = 1; nv44_mmu_oneinit()
191 struct nv04_mmu *mmu = nv04_mmu(base); nv44_mmu_init() local
192 struct nvkm_device *device = mmu->base.subdev.device; nv44_mmu_init()
193 struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0]; nv44_mmu_init()
204 nvkm_wr32(device, 0x100818, mmu->null); nv44_mmu_init()
H A Dgf100.c112 struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc; gf100_vm_map()
164 struct nvkm_mmu *mmu = vm->mmu; gf100_vm_flush() local
165 struct nvkm_device *device = mmu->subdev.device; gf100_vm_flush()
173 mutex_lock(&mmu->subdev.mutex); gf100_vm_flush()
192 mutex_unlock(&mmu->subdev.mutex); gf100_vm_flush()
196 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, gf100_vm_create() argument
199 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm); gf100_vm_create()
H A Dnv50.c80 struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram; nv50_vm_map()
158 struct nvkm_mmu *mmu = vm->mmu; nv50_vm_flush() local
159 struct nvkm_subdev *subdev = &mmu->subdev; nv50_vm_flush()
202 nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, nv50_vm_create() argument
205 u32 block = (1 << (mmu->func->pgt_bits + 12)); nv50_vm_create()
209 return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm); nv50_vm_create()
H A Dpriv.h4 #include <subdev/mmu.h>
/linux-4.4.14/arch/m32r/mm/
H A DMakefile6 obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o
8 obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
/linux-4.4.14/arch/xtensa/mm/
H A DMakefile6 obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
H A Dmmu.c2 * xtensa mmu stuff
67 * Flush the mmu and reset associated register to default values.
/linux-4.4.14/arch/arm64/mm/
H A DMakefile3 ioremap.o mmap.o pgd.o mmu.o \
/linux-4.4.14/drivers/gpu/drm/msm/
H A Dmsm_mmu.h24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
25 void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
30 void (*destroy)(struct msm_mmu *mmu);
38 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, msm_mmu_init() argument
41 mmu->dev = dev; msm_mmu_init()
42 mmu->funcs = funcs; msm_mmu_init()
H A Dmsm_iommu.c34 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) msm_iommu_attach() argument
36 struct msm_iommu *iommu = to_msm_iommu(mmu); msm_iommu_attach()
37 return iommu_attach_device(iommu->domain, mmu->dev); msm_iommu_attach()
40 static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) msm_iommu_detach() argument
42 struct msm_iommu *iommu = to_msm_iommu(mmu); msm_iommu_detach()
43 iommu_detach_device(iommu->domain, mmu->dev); msm_iommu_detach()
46 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, msm_iommu_map() argument
49 struct msm_iommu *iommu = to_msm_iommu(mmu); msm_iommu_map()
85 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, msm_iommu_unmap() argument
88 struct msm_iommu *iommu = to_msm_iommu(mmu); msm_iommu_unmap()
112 static void msm_iommu_destroy(struct msm_mmu *mmu) msm_iommu_destroy() argument
114 struct msm_iommu *iommu = to_msm_iommu(mmu); msm_iommu_destroy()
H A Dmsm_gpu.c653 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); msm_gpu_init()
654 if (IS_ERR(gpu->mmu)) { msm_gpu_init()
655 ret = PTR_ERR(gpu->mmu); msm_gpu_init()
657 gpu->mmu = NULL; msm_gpu_init()
665 gpu->id = msm_register_mmu(drm, gpu->mmu); msm_gpu_init()
701 if (gpu->mmu) msm_gpu_cleanup()
702 gpu->mmu->funcs->destroy(gpu->mmu); msm_gpu_cleanup()
H A Dmsm_gem.c297 struct msm_mmu *mmu = priv->mmus[id]; msm_gem_get_iova_locked() local
300 if (WARN_ON(!mmu)) msm_gem_get_iova_locked()
304 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, msm_gem_get_iova_locked()
525 struct msm_mmu *mmu = priv->mmus[id]; msm_gem_free_object() local
526 if (mmu && msm_obj->domain[id].iova) { msm_gem_free_object()
528 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); msm_gem_free_object()
H A Dmsm_gpu.h98 struct msm_mmu *mmu; member in struct:msm_gpu
/linux-4.4.14/arch/cris/arch-v32/kernel/
H A Dhead.S62 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \
63 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \
64 | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 5) \
65 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0
67 move.d REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8) \
68 | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4) \
69 | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0
73 move.d REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 4) \
74 | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0), $r1
90 move.d REG_STATE(mmu, rw_mm_cfg, we, on) \
91 | REG_STATE(mmu, rw_mm_cfg, acc, on) \
92 | REG_STATE(mmu, rw_mm_cfg, ex, on) \
93 | REG_STATE(mmu, rw_mm_cfg, inv, on) \
94 | REG_STATE(mmu, rw_mm_cfg, seg_f, page) \
95 | REG_STATE(mmu, rw_mm_cfg, seg_e, page) \
96 | REG_STATE(mmu, rw_mm_cfg, seg_d, linear) \
97 | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) \
98 | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) \
99 | REG_STATE(mmu, rw_mm_cfg, seg_a, page) \
100 | REG_STATE(mmu, rw_mm_cfg, seg_9, page) \
101 | REG_STATE(mmu, rw_mm_cfg, seg_8, page) \
102 | REG_STATE(mmu, rw_mm_cfg, seg_7, page) \
103 | REG_STATE(mmu, rw_mm_cfg, seg_6, page) \
104 | REG_STATE(mmu, rw_mm_cfg, seg_5, page) \
105 | REG_STATE(mmu, rw_mm_cfg, seg_4, linear) \
106 | REG_STATE(mmu, rw_mm_cfg, seg_3, page) \
107 | REG_STATE(mmu, rw_mm_cfg, seg_2, page) \
108 | REG_STATE(mmu, rw_mm_cfg, seg_1, page) \
109 | REG_STATE(mmu, rw_mm_cfg, seg_0, linear), $r2
111 move.d REG_STATE(mmu, rw_mm_cfg, we, on) \
112 | REG_STATE(mmu, rw_mm_cfg, acc, on) \
113 | REG_STATE(mmu, rw_mm_cfg, ex, on) \
114 | REG_STATE(mmu, rw_mm_cfg, inv, on) \
115 | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) \
116 | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) \
117 | REG_STATE(mmu, rw_mm_cfg, seg_d, page) \
118 | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) \
119 | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) \
120 | REG_STATE(mmu, rw_mm_cfg, seg_a, page) \
121 | REG_STATE(mmu, rw_mm_cfg, seg_9, page) \
122 | REG_STATE(mmu, rw_mm_cfg, seg_8, page) \
123 | REG_STATE(mmu, rw_mm_cfg, seg_7, page) \
124 | REG_STATE(mmu, rw_mm_cfg, seg_6, page) \
125 | REG_STATE(mmu, rw_mm_cfg, seg_5, page) \
126 | REG_STATE(mmu, rw_mm_cfg, seg_4, linear) \
127 | REG_STATE(mmu, rw_mm_cfg, seg_3, page) \
128 | REG_STATE(mmu, rw_mm_cfg, seg_2, page) \
129 | REG_STATE(mmu, rw_mm_cfg, seg_1, page) \
130 | REG_STATE(mmu, rw_mm_cfg, seg_0, linear), $r2
H A Dsetup.c82 "mmu\t\t: %s\n" show_cpuinfo()
83 "mmu DMA bug\t: %s\n" show_cpuinfo()
/linux-4.4.14/arch/s390/include/asm/
H A Dmmu.h17 /* The mmu context allocates 4K page tables. */
19 /* The mmu context uses extended page tables. */
21 /* The mmu context uses storage keys. */
/linux-4.4.14/drivers/iommu/
H A Dipmmu-vmsa.c40 struct ipmmu_vmsa_device *mmu; member in struct:ipmmu_vmsa_domain
51 struct ipmmu_vmsa_device *mmu; member in struct:ipmmu_vmsa_archdata
188 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) ipmmu_read() argument
190 return ioread32(mmu->base + offset); ipmmu_read()
193 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, ipmmu_write() argument
196 iowrite32(data, mmu->base + offset); ipmmu_write()
201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); ipmmu_ctx_read()
207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); ipmmu_ctx_write()
222 dev_err_ratelimited(domain->mmu->dev, ipmmu_tlb_sync()
247 struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_utlb_enable() local
255 ipmmu_write(mmu, IMUASID(utlb), 0); ipmmu_utlb_enable()
257 ipmmu_write(mmu, IMUCTR(utlb), ipmmu_utlb_enable()
268 struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_utlb_disable() local
270 ipmmu_write(mmu, IMUCTR(utlb), 0); ipmmu_utlb_disable()
320 domain->cfg.iommu_dev = domain->mmu->dev; ipmmu_domain_init_context()
392 struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_domain_irq() local
412 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", ipmmu_domain_irq()
415 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", ipmmu_domain_irq()
427 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) ipmmu_domain_irq()
430 dev_err_ratelimited(mmu->dev, ipmmu_domain_irq()
439 struct ipmmu_vmsa_device *mmu = dev; ipmmu_irq() local
443 if (!mmu->mapping) ipmmu_irq()
446 io_domain = mmu->mapping->domain; ipmmu_irq()
489 struct ipmmu_vmsa_device *mmu = archdata->mmu; ipmmu_attach_device() local
495 if (!mmu) { ipmmu_attach_device()
502 if (!domain->mmu) { ipmmu_attach_device()
504 domain->mmu = mmu; ipmmu_attach_device()
506 } else if (domain->mmu != mmu) { ipmmu_attach_device()
512 dev_name(mmu->dev), dev_name(domain->mmu->dev)); ipmmu_attach_device()
571 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, ipmmu_find_utlbs() argument
587 if (args.np != mmu->dev->of_node || args.args_count != 1) ipmmu_find_utlbs()
599 struct ipmmu_vmsa_device *mmu; ipmmu_add_device() local
625 list_for_each_entry(mmu, &ipmmu_devices, list) { ipmmu_add_device()
626 ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs); ipmmu_add_device()
642 if (utlbs[i] >= mmu->num_utlbs) { ipmmu_add_device()
671 archdata->mmu = mmu; ipmmu_add_device()
685 if (!mmu->mapping) { ipmmu_add_device()
691 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); ipmmu_add_device()
696 mmu->mapping = mapping; ipmmu_add_device()
700 ret = arm_iommu_attach_device(dev, mmu->mapping); ipmmu_add_device()
709 arm_iommu_release_mapping(mmu->mapping); ipmmu_add_device()
753 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) ipmmu_device_reset() argument
759 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); ipmmu_device_reset()
764 struct ipmmu_vmsa_device *mmu; ipmmu_probe() local
774 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); ipmmu_probe()
775 if (!mmu) { ipmmu_probe()
780 mmu->dev = &pdev->dev; ipmmu_probe()
781 mmu->num_utlbs = 32; ipmmu_probe()
785 mmu->base = devm_ioremap_resource(&pdev->dev, res); ipmmu_probe()
786 if (IS_ERR(mmu->base)) ipmmu_probe()
787 return PTR_ERR(mmu->base); ipmmu_probe()
801 mmu->base += IM_NS_ALIAS_OFFSET; ipmmu_probe()
810 dev_name(&pdev->dev), mmu); ipmmu_probe()
816 ipmmu_device_reset(mmu); ipmmu_probe()
825 list_add(&mmu->list, &ipmmu_devices); ipmmu_probe()
828 platform_set_drvdata(pdev, mmu); ipmmu_probe()
835 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); ipmmu_remove() local
838 list_del(&mmu->list); ipmmu_remove()
841 arm_iommu_release_mapping(mmu->mapping); ipmmu_remove()
843 ipmmu_device_reset(mmu); ipmmu_remove()
/linux-4.4.14/arch/unicore32/mm/
H A DMakefile5 obj-y := extable.o fault.o init.o pgd.o mmu.o
H A Dmmu.c2 * linux/arch/unicore32/mm/mmu.c
446 * results when turning the mmu off
/linux-4.4.14/include/asm-generic/
H A Dmmu.h5 * This is the mmu.h header for nommu implementations.
/linux-4.4.14/arch/frv/mm/
H A DMakefile9 mmu-context.o dma-alloc.o elf-fdpic.o
H A Ddma-alloc.c46 #include <asm/mmu.h>
H A Dmmu-context.c0 /* mmu-context.c: MMU context allocation and management
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dgf100.h6 #include <subdev/mmu.h>
H A Dchan.c29 #include <subdev/mmu.h>
356 struct nvkm_mmu *mmu = device->mmu; nvkm_fifo_chan_ctor() local
385 if (!vm && mmu) { nvkm_fifo_chan_ctor()
386 if (!client->vm || client->vm->mmu == mmu) { nvkm_fifo_chan_ctor()
H A Dgk104.h6 #include <subdev/mmu.h>
H A Dchang84.c28 #include <subdev/mmu.h>
H A Dchannv50.c28 #include <subdev/mmu.h>
/linux-4.4.14/arch/um/kernel/skas/
H A DMakefile6 obj-y := clone.o mmu.o process.o syscall.o uaccess.o
H A Dmmu.c141 struct mm_context *mmu = &mm->context; destroy_context() local
149 if (mmu->id.u.pid < 2) { destroy_context()
151 mmu->id.u.pid); destroy_context()
154 os_kill_ptraced_process(mmu->id.u.pid, 1); destroy_context()
156 free_page(mmu->id.stack); destroy_context()
157 free_ldt(mmu); destroy_context()
/linux-4.4.14/arch/unicore32/include/asm/
H A Dmmu.h2 * linux/arch/unicore32/include/asm/mmu.h
/linux-4.4.14/arch/x86/kvm/
H A DMakefile14 kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
H A Dmmu.c22 #include "mmu.h"
487 * out of mmu-lock, it can ensure dirty bit is not lost, spte_has_volatile_bits()
556 * For the spte updated out of mmu-lock is safe, since mmu_spte_update()
605 * kvm mmu, before reclaiming the page, we should mmu_spte_clear_track_bits()
606 * unmap it from mmu first. mmu_spte_clear_track_bits()
1907 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { __kvm_sync_page()
1956 (vcpu->arch.mmu.sync_page(vcpu, s))) { kvm_sync_pages()
2094 role = vcpu->arch.mmu.base_role; kvm_mmu_get_page()
2100 if (!vcpu->arch.mmu.direct_map kvm_mmu_get_page()
2101 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { kvm_mmu_get_page()
2156 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; shadow_walk_init()
2157 iterator->level = vcpu->arch.mmu.shadow_root_level; shadow_walk_init()
2160 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && shadow_walk_init()
2161 !vcpu->arch.mmu.direct_map) shadow_walk_init()
2166 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; shadow_walk_init()
2391 * Changing the number of mmu pages allocated to the vm
2401 /* Need to free some mmu pages to achieve the goal. */ kvm_mmu_change_mmu_pages()
2527 * mapping_level() and acquiring mmu-lock. We can set_spte()
2720 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) __direct_map()
2853 * W bit of the spte which can be done out of mmu-lock. page_fault_can_be_fast()
2907 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) fast_page_fault()
3041 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) mmu_free_roots()
3044 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && mmu_free_roots()
3045 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || mmu_free_roots()
3046 vcpu->arch.mmu.direct_map)) { mmu_free_roots()
3047 hpa_t root = vcpu->arch.mmu.root_hpa; mmu_free_roots()
3057 vcpu->arch.mmu.root_hpa = INVALID_PAGE; mmu_free_roots()
3063 hpa_t root = vcpu->arch.mmu.pae_root[i]; mmu_free_roots()
3073 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; mmu_free_roots()
3077 vcpu->arch.mmu.root_hpa = INVALID_PAGE; mmu_free_roots()
3097 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { mmu_alloc_direct_roots()
3104 vcpu->arch.mmu.root_hpa = __pa(sp->spt); mmu_alloc_direct_roots()
3105 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { mmu_alloc_direct_roots()
3107 hpa_t root = vcpu->arch.mmu.pae_root[i]; mmu_alloc_direct_roots()
3119 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; mmu_alloc_direct_roots()
3121 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); mmu_alloc_direct_roots()
3135 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; mmu_alloc_shadow_roots()
3144 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { mmu_alloc_shadow_roots()
3145 hpa_t root = vcpu->arch.mmu.root_hpa; mmu_alloc_shadow_roots()
3156 vcpu->arch.mmu.root_hpa = root; mmu_alloc_shadow_roots()
3166 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) mmu_alloc_shadow_roots()
3170 hpa_t root = vcpu->arch.mmu.pae_root[i]; mmu_alloc_shadow_roots()
3173 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { mmu_alloc_shadow_roots()
3174 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); mmu_alloc_shadow_roots()
3176 vcpu->arch.mmu.pae_root[i] = 0; mmu_alloc_shadow_roots()
3192 vcpu->arch.mmu.pae_root[i] = root | pm_mask; mmu_alloc_shadow_roots()
3194 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); mmu_alloc_shadow_roots()
3200 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { mmu_alloc_shadow_roots()
3201 if (vcpu->arch.mmu.lm_root == NULL) { mmu_alloc_shadow_roots()
3213 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; mmu_alloc_shadow_roots()
3215 vcpu->arch.mmu.lm_root = lm_root; mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); mmu_alloc_shadow_roots()
3226 if (vcpu->arch.mmu.direct_map) mmu_alloc_roots()
3237 if (vcpu->arch.mmu.direct_map) mmu_sync_roots()
3240 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) mmu_sync_roots()
3245 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { mmu_sync_roots()
3246 hpa_t root = vcpu->arch.mmu.root_hpa; mmu_sync_roots()
3253 hpa_t root = vcpu->arch.mmu.pae_root[i]; mmu_sync_roots()
3298 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) is_rsvd_bits_set() argument
3300 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); is_rsvd_bits_set()
3303 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) is_shadow_zero_bits_set() argument
3305 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); is_shadow_zero_bits_set()
3325 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) walk_shadow_page_get_mmio_spte()
3342 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, walk_shadow_page_get_mmio_spte()
3416 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); nonpaging_page_fault()
3430 arch.direct_map = vcpu->arch.mmu.direct_map; kvm_arch_setup_async_pf()
3431 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); kvm_arch_setup_async_pf()
3493 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); tdp_page_fault()
3574 vcpu->arch.mmu.inject_page_fault(vcpu, fault); inject_page_fault()
3594 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) is_last_gpte() argument
3600 return mmu->last_pte_bitmap & (1 << index); is_last_gpte()
3751 * table in guest or amd nested guest, its mmu features completely
3778 * the direct page table on host, use as much mmu features as
3810 struct kvm_mmu *mmu, bool ept) update_permission_bitmask()
3818 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { update_permission_bitmask()
3837 x |= !mmu->nx; update_permission_bitmask()
3870 mmu->permissions[byte] = map; update_permission_bitmask()
3874 static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) update_last_pte_bitmap() argument
3877 unsigned level, root_level = mmu->root_level; update_last_pte_bitmap()
3886 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) update_last_pte_bitmap()
3889 mmu->last_pte_bitmap = map; update_last_pte_bitmap()
3948 struct kvm_mmu *context = &vcpu->arch.mmu; init_kvm_tdp_mmu()
3994 struct kvm_mmu *context = &vcpu->arch.mmu; kvm_init_shadow_mmu()
4021 struct kvm_mmu *context = &vcpu->arch.mmu; kvm_init_shadow_ept_mmu()
4045 struct kvm_mmu *context = &vcpu->arch.mmu; init_kvm_softmmu()
4063 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The init_kvm_nested_mmu()
4066 * functions between mmu and nested_mmu are swapped. init_kvm_nested_mmu()
4122 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); kvm_mmu_load()
4131 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); kvm_mmu_unload()
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); mmu_pte_write_new_pte()
4344 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) kvm_mmu_pte_write()
4363 if (vcpu->arch.mmu.direct_map) kvm_mmu_unprotect_page_virt()
4392 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) is_mmio_page_fault()
4404 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); kvm_mmu_page_fault()
4436 vcpu->arch.mmu.invlpg(vcpu, gva); kvm_mmu_invlpg()
4456 free_page((unsigned long)vcpu->arch.mmu.pae_root); free_mmu_pages()
4457 if (vcpu->arch.mmu.lm_root != NULL) free_mmu_pages()
4458 free_page((unsigned long)vcpu->arch.mmu.lm_root); free_mmu_pages()
4475 vcpu->arch.mmu.pae_root = page_address(page); alloc_mmu_pages()
4477 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; alloc_mmu_pages()
4484 vcpu->arch.walk_mmu = &vcpu->arch.mmu; kvm_mmu_create()
4485 vcpu->arch.mmu.root_hpa = INVALID_PAGE; kvm_mmu_create()
4486 vcpu->arch.mmu.translate_gpa = translate_gpa; kvm_mmu_create()
4494 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); kvm_mmu_setup()
4502 /* The caller should hold mmu-lock before calling this function. */
4611 * which do tlb flush out of mmu-lock should be serialized by kvm_mmu_slot_remove_write_access()
4617 * We can flush all the TLBs out of the mmu lock without TLB kvm_mmu_slot_remove_write_access()
4686 * It's also safe to flush TLBs out of mmu lock here as currently this kvm_mmu_slot_leaf_clear_dirty()
4688 * out of mmu lock also guarantees no dirty pages will be lost in kvm_mmu_slot_leaf_clear_dirty()
4803 * mmu-lock, otherwise, vcpu would purge shadow page kvm_mmu_invalidate_zap_all_pages()
4937 * Caculate mmu pages needed for kvm.
3809 update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, bool ept) update_permission_bitmask() argument
H A Dmmu.h89 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) kvm_mmu_reload()
108 * mmu-lock. And the another case does not need to flush tlb until returning
111 * missed, so it can flush tlb out of mmu-lock.
126 * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
147 static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, permission_fault() argument
172 return (mmu->permissions[index] >> pte_access) & 1; permission_fault()
H A Dpaging_tmpl.h22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
140 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, cmpxchg_gpte() argument
167 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) prefetch_invalid_gpte()
200 struct kvm_mmu *mmu, update_accessed_dirty_bits()
247 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); update_accessed_dirty_bits()
261 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, walk_addr_generic()
280 walker->level = mmu->root_level; walk_addr_generic()
281 pte = mmu->get_cr3(vcpu); walk_addr_generic()
285 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); walk_addr_generic()
314 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), walk_addr_generic()
348 if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { walk_addr_generic()
357 } while (!is_last_gpte(mmu, walker->level, pte)); walk_addr_generic()
359 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) { walk_addr_generic()
370 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); walk_addr_generic()
388 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); walk_addr_generic()
403 if (fetch_fault && (mmu->nx || walk_addr_generic()
430 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; walk_addr_generic()
439 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, walk_addr()
563 top_level = vcpu->arch.mmu.root_level; fetch()
575 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) fetch()
834 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { invlpg()
199 update_accessed_dirty_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, int write_fault) update_accessed_dirty_bits() argument
260 walk_addr_generic(struct guest_walker *walker, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gva_t addr, u32 access) walk_addr_generic() argument
H A Dmmu_audit.c62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) mmu_spte_walk()
65 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { mmu_spte_walk()
66 hpa_t root = vcpu->arch.mmu.root_hpa; mmu_spte_walk()
74 hpa_t root = vcpu->arch.mmu.pae_root[i]; mmu_spte_walk()
125 "ent %llxn", vcpu->arch.mmu.root_level, pfn, audit_mappings()
H A Dmmutrace.h297 TP_printk("kvm-mmu-valid-gen %lx used_pages %x",
/linux-4.4.14/arch/xtensa/include/asm/
H A Dmmu.h13 #include <asm-generic/mmu.h>
H A Dplatform.h20 * platform_init is called before the mmu is initialized to give the
/linux-4.4.14/mm/
H A DMakefile8 mmu-y := nommu.o
9 mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
14 mmu-$(CONFIG_MMU) += process_vm_access.o
24 debug.o $(mmu-y)
H A Dinit-mm.c10 #include <asm/mmu.h>
H A Dmmu_notifier.c47 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
52 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
285 * We can't race against any other mmu notifier method either do_mmu_notifier_register()
342 * running mmu notifiers with SRCU and against mmu_notifier_unregister
/linux-4.4.14/arch/mn10300/include/asm/
H A Dmmu.h5 * - Derived from include/asm-frv/mmu.h
/linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/asm/
H A Dmmu_defs_asm.h6 * file: ../../inst/mmu/doc/mmu_regs.r
10 * by /n/asic/design/tools/rdesc/src/rdes2c -asm --outfile asm/mmu_defs_asm.h ../../inst/mmu/doc/mmu_regs.r
56 /* Register rw_mm_cfg, scope mmu, type rw */
119 /* Register rw_mm_kbase_lo, scope mmu, type rw */
138 /* Register rw_mm_kbase_hi, scope mmu, type rw */
157 /* Register r_mm_cause, scope mmu, type r */
166 /* Register rw_mm_tlb_sel, scope mmu, type rw */
173 /* Register rw_mm_tlb_lo, scope mmu, type rw */
193 /* Register rw_mm_tlb_hi, scope mmu, type rw */
/linux-4.4.14/arch/arm/lib/
H A DMakefile18 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
24 lib-$(CONFIG_MMU) += $(mmu-y)
/linux-4.4.14/arch/x86/include/asm/
H A Dmmu.h8 * The x86 doesn't have a mmu context, but
H A Dpgtable_32.h10 * i386 mmu expects.
H A Dkvm_host.h183 * We don't want allocation failures within the mmu code, so we preallocate
242 * Used out of the mmu-lock to avoid reading spte values while an
266 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
419 struct kvm_mmu mmu; member in struct:kvm_vcpu_arch
432 * Pointer to the mmu context currently used for
953 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1047 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
H A Dacpi.h31 #include <asm/mmu.h>
/linux-4.4.14/arch/x86/xen/
H A DMakefile13 obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
H A Dsuspend.c14 #include "mmu.h"
/linux-4.4.14/arch/frv/kernel/
H A DMakefile6 heads-$(CONFIG_MMU) := head-mmu-fr451.o
H A Dhead-mmu-fr451.S1 /* head-mmu-fr451.S: FR451 mmu-linux specific bits of initialisation
/linux-4.4.14/arch/arc/mm/
H A Dtlb.c59 #include <asm/mmu.h>
251 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; local_flush_tlb_all() local
254 int num_tlb = mmu->sets * mmu->ways; local_flush_tlb_all()
727 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; read_decode_mmu_bcr() local
759 mmu->ver = (tmp >> 24); read_decode_mmu_bcr()
761 if (mmu->ver <= 2) { read_decode_mmu_bcr()
763 mmu->pg_sz_k = TO_KB(0x2000); read_decode_mmu_bcr()
764 mmu->sets = 1 << mmu2->sets; read_decode_mmu_bcr()
765 mmu->ways = 1 << mmu2->ways; read_decode_mmu_bcr()
766 mmu->u_dtlb = mmu2->u_dtlb; read_decode_mmu_bcr()
767 mmu->u_itlb = mmu2->u_itlb; read_decode_mmu_bcr()
768 } else if (mmu->ver == 3) { read_decode_mmu_bcr()
770 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); read_decode_mmu_bcr()
771 mmu->sets = 1 << mmu3->sets; read_decode_mmu_bcr()
772 mmu->ways = 1 << mmu3->ways; read_decode_mmu_bcr()
773 mmu->u_dtlb = mmu3->u_dtlb; read_decode_mmu_bcr()
774 mmu->u_itlb = mmu3->u_itlb; read_decode_mmu_bcr()
775 mmu->sasid = mmu3->sasid; read_decode_mmu_bcr()
778 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); read_decode_mmu_bcr()
779 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); read_decode_mmu_bcr()
780 mmu->sets = 64 << mmu4->n_entry; read_decode_mmu_bcr()
781 mmu->ways = mmu4->n_ways * 2; read_decode_mmu_bcr()
782 mmu->u_dtlb = mmu4->u_dtlb * 4; read_decode_mmu_bcr()
783 mmu->u_itlb = mmu4->u_itlb * 4; read_decode_mmu_bcr()
784 mmu->sasid = mmu4->sasid; read_decode_mmu_bcr()
785 mmu->pae = mmu4->pae; read_decode_mmu_bcr()
792 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; arc_mmu_mumbojumbo()
813 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; arc_mmu_init() local
825 if (mmu->ver != CONFIG_ARC_MMU_VER) { arc_mmu_init()
827 mmu->ver, CONFIG_ARC_MMU_VER); arc_mmu_init()
830 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) arc_mmu_init()
834 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) arc_mmu_init()
838 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae) arc_mmu_init()
866 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
881 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; do_tlb_overlap_fault() local
882 unsigned int pd0[mmu->ways]; do_tlb_overlap_fault()
892 for (set = 0; set < mmu->sets; set++) { do_tlb_overlap_fault()
897 for (way = 0, is_valid = 0; way < mmu->ways; way++) { do_tlb_overlap_fault()
899 SET_WAY_TO_IDX(mmu, set, way)); do_tlb_overlap_fault()
911 for (way = 0; way < mmu->ways - 1; way++) { do_tlb_overlap_fault()
918 for (n = way + 1; n < mmu->ways; n++) { do_tlb_overlap_fault()
932 SET_WAY_TO_IDX(mmu, set, way)); do_tlb_overlap_fault()
H A Dtlbex.S40 #include <asm/mmu.h>
253 ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
/linux-4.4.14/arch/powerpc/kvm/
H A Dbook3s_32_mmu.c414 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; kvmppc_mmu_book3s_32_init() local
416 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; kvmppc_mmu_book3s_32_init()
417 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; kvmppc_mmu_book3s_32_init()
418 mmu->xlate = kvmppc_mmu_book3s_32_xlate; kvmppc_mmu_book3s_32_init()
419 mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; kvmppc_mmu_book3s_32_init()
420 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; kvmppc_mmu_book3s_32_init()
421 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; kvmppc_mmu_book3s_32_init()
422 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; kvmppc_mmu_book3s_32_init()
423 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; kvmppc_mmu_book3s_32_init()
425 mmu->slbmte = NULL; kvmppc_mmu_book3s_32_init()
426 mmu->slbmfee = NULL; kvmppc_mmu_book3s_32_init()
427 mmu->slbmfev = NULL; kvmppc_mmu_book3s_32_init()
428 mmu->slbie = NULL; kvmppc_mmu_book3s_32_init()
429 mmu->slbia = NULL; kvmppc_mmu_book3s_32_init()
H A Dbook3s_emulate.c156 if (vcpu->arch.mmu.mfsrin) { kvmppc_core_emulate_op_pr()
158 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_core_emulate_op_pr()
168 if (vcpu->arch.mmu.mfsrin) { kvmppc_core_emulate_op_pr()
170 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); kvmppc_core_emulate_op_pr()
176 vcpu->arch.mmu.mtsrin(vcpu, kvmppc_core_emulate_op_pr()
181 vcpu->arch.mmu.mtsrin(vcpu, kvmppc_core_emulate_op_pr()
190 vcpu->arch.mmu.tlbie(vcpu, addr, large); kvmppc_core_emulate_op_pr()
224 if (!vcpu->arch.mmu.slbmte) kvmppc_core_emulate_op_pr()
227 vcpu->arch.mmu.slbmte(vcpu, kvmppc_core_emulate_op_pr()
232 if (!vcpu->arch.mmu.slbie) kvmppc_core_emulate_op_pr()
235 vcpu->arch.mmu.slbie(vcpu, kvmppc_core_emulate_op_pr()
239 if (!vcpu->arch.mmu.slbia) kvmppc_core_emulate_op_pr()
242 vcpu->arch.mmu.slbia(vcpu); kvmppc_core_emulate_op_pr()
245 if (!vcpu->arch.mmu.slbmfee) { kvmppc_core_emulate_op_pr()
251 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); kvmppc_core_emulate_op_pr()
256 if (!vcpu->arch.mmu.slbmfev) { kvmppc_core_emulate_op_pr()
262 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); kvmppc_core_emulate_op_pr()
439 if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_core_emulate_mtspr_pr()
H A Dbook3s_64_mmu.c29 #include <asm/mmu-hash64.h>
658 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; kvmppc_mmu_book3s_64_init() local
660 mmu->mfsrin = NULL; kvmppc_mmu_book3s_64_init()
661 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; kvmppc_mmu_book3s_64_init()
662 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; kvmppc_mmu_book3s_64_init()
663 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; kvmppc_mmu_book3s_64_init()
664 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; kvmppc_mmu_book3s_64_init()
665 mmu->slbie = kvmppc_mmu_book3s_64_slbie; kvmppc_mmu_book3s_64_init()
666 mmu->slbia = kvmppc_mmu_book3s_64_slbia; kvmppc_mmu_book3s_64_init()
667 mmu->xlate = kvmppc_mmu_book3s_64_xlate; kvmppc_mmu_book3s_64_init()
668 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; kvmppc_mmu_book3s_64_init()
669 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; kvmppc_mmu_book3s_64_init()
670 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; kvmppc_mmu_book3s_64_init()
671 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; kvmppc_mmu_book3s_64_init()
672 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; kvmppc_mmu_book3s_64_init()
H A Dbook3s_64_mmu_host.c26 #include <asm/mmu-hash64.h>
118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); kvmppc_mmu_map_page()
222 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); kvmppc_mmu_unmap_page()
322 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { kvmppc_mmu_map_segment()
H A Dbook3s_pr_papr.c109 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_h_pr_remove()
191 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_h_pr_bulk_remove()
234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_h_pr_protect()
H A Dbook3s_32_mmu_host.c25 #include <asm/mmu-hash32.h>
170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); kvmppc_mmu_map_page()
323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { kvmppc_mmu_map_segment()
H A Dbook3s_64_vio.c33 #include <asm/mmu-hash64.h>
H A Dbook3s_64_vio_hv.c32 #include <asm/mmu-hash64.h>
H A Dbook3s_rmhandlers.S23 #include <asm/mmu.h>
H A Dfpu.S15 #include <asm/mmu.h>
H A Dbook3s_pr.c430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && kvmppc_set_pvr_pr()
550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); kvmppc_handle_pagefault()
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); kvmppc_handle_pagefault()
585 if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_handle_pagefault()
631 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_handle_pagefault()
930 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && kvmppc_handle_exit_pr()
1287 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, kvm_arch_vcpu_ioctl_set_sregs_pr()
1292 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); kvm_arch_vcpu_ioctl_set_sregs_pr()
/linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp5/
H A Dmdp5_kms.c133 struct msm_mmu *mmu = mdp5_kms->mmu; mdp5_destroy() local
137 if (mmu) { mdp5_destroy()
138 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); mdp5_destroy()
139 mmu->funcs->destroy(mmu); mdp5_destroy()
477 struct msm_mmu *mmu; mdp5_kms_init() local
598 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); mdp5_kms_init()
599 if (IS_ERR(mmu)) { mdp5_kms_init()
600 ret = PTR_ERR(mmu); mdp5_kms_init()
606 ret = mmu->funcs->attach(mmu, iommu_ports, mdp5_kms_init()
610 mmu->funcs->destroy(mmu); mdp5_kms_init()
616 mmu = NULL; mdp5_kms_init()
618 mdp5_kms->mmu = mmu; mdp5_kms_init()
620 mdp5_kms->id = msm_register_mmu(dev, mmu); mdp5_kms_init()
H A Dmdp5_kms.h40 struct msm_mmu *mmu; member in struct:mdp5_kms
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/device/
H A Dbase.c87 .mmu = nv04_mmu_new,
108 .mmu = nv04_mmu_new,
130 .mmu = nv04_mmu_new,
150 .mmu = nv04_mmu_new,
172 .mmu = nv04_mmu_new,
194 .mmu = nv04_mmu_new,
216 .mmu = nv04_mmu_new,
238 .mmu = nv04_mmu_new,
260 .mmu = nv04_mmu_new,
282 .mmu = nv04_mmu_new,
304 .mmu = nv04_mmu_new,
326 .mmu = nv04_mmu_new,
348 .mmu = nv04_mmu_new,
370 .mmu = nv04_mmu_new,
392 .mmu = nv04_mmu_new,
415 .mmu = nv04_mmu_new,
438 .mmu = nv04_mmu_new,
460 .mmu = nv04_mmu_new,
483 .mmu = nv04_mmu_new,
509 .mmu = nv41_mmu_new,
535 .mmu = nv41_mmu_new,
561 .mmu = nv41_mmu_new,
587 .mmu = nv44_mmu_new,
613 .mmu = nv04_mmu_new,
639 .mmu = nv44_mmu_new,
665 .mmu = nv41_mmu_new,
691 .mmu = nv41_mmu_new,
717 .mmu = nv44_mmu_new,
743 .mmu = nv41_mmu_new,
769 .mmu = nv44_mmu_new,
795 .mmu = nv44_mmu_new,
823 .mmu = nv50_mmu_new,
850 .mmu = nv44_mmu_new,
876 .mmu = nv44_mmu_new,
902 .mmu = nv44_mmu_new,
930 .mmu = nv50_mmu_new,
962 .mmu = nv50_mmu_new,
994 .mmu = nv50_mmu_new,
1026 .mmu = nv50_mmu_new,
1058 .mmu = nv50_mmu_new,
1090 .mmu = nv50_mmu_new,
1122 .mmu = nv50_mmu_new,
1154 .mmu = nv50_mmu_new,
1188 .mmu = nv50_mmu_new,
1221 .mmu = nv50_mmu_new,
1254 .mmu = nv50_mmu_new,
1286 .mmu = nv50_mmu_new,
1318 .mmu = nv50_mmu_new,
1353 .mmu = gf100_mmu_new,
1389 .mmu = gf100_mmu_new,
1424 .mmu = gf100_mmu_new,
1459 .mmu = gf100_mmu_new,
1495 .mmu = gf100_mmu_new,
1531 .mmu = gf100_mmu_new,
1567 .mmu = gf100_mmu_new,
1602 .mmu = gf100_mmu_new,
1635 .mmu = gf100_mmu_new,
1670 .mmu = gf100_mmu_new,
1707 .mmu = gf100_mmu_new,
1744 .mmu = gf100_mmu_new,
1777 .mmu = gf100_mmu_new,
1805 .mmu = gf100_mmu_new,
1841 .mmu = gf100_mmu_new,
1877 .mmu = gf100_mmu_new,
1913 .mmu = gf100_mmu_new,
1949 .mmu = gf100_mmu_new,
1980 .mmu = gf100_mmu_new,
2011 .mmu = gf100_mmu_new,
2038 .mmu = gf100_mmu_new,
2088 _(MMU , device->mmu , &device->mmu->subdev); nvkm_device_subdev()
2533 _(NVKM_SUBDEV_MMU , mmu);
H A Dpriv.h18 #include <subdev/mmu.h>
/linux-4.4.14/arch/sh/mm/
H A DMakefile17 mmu-y := nommu.o extable_32.o
18 mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \
21 obj-y += $(mmu-y)
H A Dioremap_fixed.c24 #include <asm/mmu.h>
H A Dioremap.c26 #include <asm/mmu.h>
/linux-4.4.14/arch/arm64/kvm/
H A DMakefile15 kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
/linux-4.4.14/arch/arm/kvm/
H A DMakefile21 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
H A Dtrace.h196 TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
213 TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
229 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
246 TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
262 TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvif/
H A Ddevice.h41 #include <subdev/mmu.h>
60 #define nvxx_mmu(a) nvxx_device(a)->mmu
/linux-4.4.14/arch/sparc/include/asm/
H A Ddcu.h7 #define DCU_CP _AC(0x0002000000000000,UL) /* Phys Cache Enable w/o mmu */
8 #define DCU_CV _AC(0x0001000000000000,UL) /* Virt Cache Enable w/o mmu */
H A Dmmu_context_32.h12 /* Initialize a new mmu context. This is invoked when a new
H A Dturbosparc.h31 * ME: MMU enable -- 0 = mmu not translating, 1 = mmu translating
H A Dcontregs.h5 * space. These are for the mmu's context register, etc.
H A Dmbus.h43 * the mmu control register from ASI_M_MMUREG alternate address space and
H A Dswift.h10 /* Swift is so brain damaged, here is the mmu control register. */
H A Dviking.h81 * ME: MMU enable -- 0 = mmu not translating, 1 = mmu translating
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/dma/
H A Dusernv04.c29 #include <subdev/mmu/nv04.h>
52 struct nv04_mmu *mmu = nv04_mmu(device->mmu); nv04_dmaobj_bind() local
53 struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0]; nv04_dmaobj_bind()
98 if (device->mmu->func == &nv04_mmu) nv04_dmaobj_new()
/linux-4.4.14/arch/m68k/kernel/
H A Dsetup_mm.c407 const char *cpu, *mmu, *fpu; show_cpuinfo() local
456 mmu = "68851"; show_cpuinfo()
458 mmu = "68030"; show_cpuinfo()
460 mmu = "68040"; show_cpuinfo()
462 mmu = "68060"; show_cpuinfo()
464 mmu = "Sun-3"; show_cpuinfo()
466 mmu = "Apollo"; show_cpuinfo()
468 mmu = "ColdFire"; show_cpuinfo()
470 mmu = "unknown"; show_cpuinfo()
480 cpu, mmu, fpu, show_cpuinfo()
H A Dsetup_no.c274 char *cpu, *mmu, *fpu; show_cpuinfo() local
278 mmu = "none"; show_cpuinfo()
288 cpu, mmu, fpu, show_cpuinfo()
/linux-4.4.14/arch/microblaze/include/uapi/asm/
H A Dunistd.h59 #define __NR_brk 45 /* ok -mmu, nommu specific */
105 #define __NR_munmap 91 /* ok - mmu and nommu */
164 #define __NR_mlock 150 /* ok - nommu or mmu */
165 #define __NR_munlock 151 /* ok - nommu or mmu */
166 #define __NR_mlockall 152 /* ok - nommu or mmu */
167 #define __NR_munlockall 153 /* ok - nommu or mmu */
177 #define __NR_mremap 163 /* ok - nommu or mmu */
272 #define __NR_remap_file_pages 257 /* only for mmu */
289 #define __NR_mbind 274 /* only for mmu */
290 #define __NR_get_mempolicy 275 /* only for mmu */
291 #define __NR_set_mempolicy 276 /* only for mmu */
309 #define __NR_migrate_pages 294 /* mmu */
332 #define __NR_move_pages 317 /* mmu */
/linux-4.4.14/include/linux/fsl/bestcomm/
H A Dsram.h16 #include <asm/mmu.h>
/linux-4.4.14/arch/powerpc/include/asm/
H A Dtlb.h23 #include <asm/mmu.h>
H A Dmmu.h185 # include <asm/mmu-hash64.h>
188 # include <asm/mmu-hash32.h>
191 # include <asm/mmu-40x.h>
194 # include <asm/mmu-44x.h>
197 # include <asm/mmu-book3e.h>
200 # include <asm/mmu-8xx.h>
H A Dpte-40x.h12 * and ITLB, respectively (see "mmu.h" for definitions).
H A Dlppaca.h36 #include <asm/mmu.h>
H A Dmmu_context.h9 #include <asm/mmu.h>
H A Dpte-44x.h54 * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
/linux-4.4.14/arch/powerpc/sysdev/
H A Dfsl_soc.h5 #include <asm/mmu.h>
/linux-4.4.14/arch/sh/include/asm/
H A Dtraps_32.h5 #include <asm/mmu.h>
H A Dpgtable.h21 #include <asm/mmu.h>
/linux-4.4.14/arch/mn10300/mm/
H A DMakefile28 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
/linux-4.4.14/arch/avr32/include/asm/
H A Dtlbflush.h11 #include <asm/mmu.h>
/linux-4.4.14/arch/cris/arch-v10/kernel/
H A Dsetup.c73 "mmu\t\t: %s\n" show_cpuinfo()
74 "mmu DMA bug\t: %s\n" show_cpuinfo()
/linux-4.4.14/arch/m68k/include/asm/
H A Dcontregs.h5 * space. These are for the mmu's context register, etc.
17 #define AC_CONTEXT 0x30000000 /* 34c current mmu-context */
H A Dprocessor.h142 /* Any fault in kernel is fatal on non-mmu */ handle_kernel_fault()
H A Dsun3mmu.h31 #define AC_CONTEXT 0x30000000 /* 34c current mmu-context */
/linux-4.4.14/drivers/gpu/drm/gma500/
H A Dpsb_drv.c182 if (dev_priv->mmu) { psb_driver_unload()
188 (dev_priv->mmu), psb_driver_unload()
192 psb_mmu_driver_takedown(dev_priv->mmu); psb_driver_unload()
193 dev_priv->mmu = NULL; psb_driver_unload()
328 dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); psb_driver_load()
329 if (!dev_priv->mmu) psb_driver_load()
332 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); psb_driver_load()
342 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu), psb_driver_load()
348 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); psb_driver_load()
H A Dgtt.c262 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), psb_gtt_pin()
304 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), psb_gtt_unpin()
448 * The video mmu has a hw bug when accessing 0x0D0000000. psb_gtt_init()
/linux-4.4.14/arch/microblaze/kernel/cpu/
H A Dcpuinfo-static.c119 ci->mmu = fcpu(cpu, "xlnx,use-mmu"); set_cpuinfo_static()
120 ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr"); set_cpuinfo_static()
H A Dcpuinfo-pvr-full.c73 CI(mmu, USE_MMU); set_cpuinfo_pvr_full()
H A Dmb.c74 seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu); show_cpuinfo()
/linux-4.4.14/arch/nios2/kernel/
H A Dcpuinfo.c75 cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); setup_cpuinfo()
139 cpuinfo.mmu ? "present" : "none", show_cpuinfo()
/linux-4.4.14/arch/powerpc/boot/
H A Doflib.c110 * space in the chosen mmu node, and then do a map operation to
140 if (of_getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) { check_of_version()
141 printf("no mmu\n"); check_of_version()
/linux-4.4.14/drivers/gpu/drm/msm/adreno/
H A Dadreno_gpu.c336 struct msm_mmu *mmu; adreno_gpu_init() local
375 mmu = gpu->mmu; adreno_gpu_init()
376 if (mmu) { adreno_gpu_init()
377 ret = mmu->funcs->attach(mmu, iommu_ports, adreno_gpu_init()
/linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp4/
H A Dmdp4_kms.c411 struct msm_mmu *mmu; mdp4_kms_init() local
500 mmu = msm_iommu_new(&pdev->dev, config->iommu); mdp4_kms_init()
501 if (IS_ERR(mmu)) { mdp4_kms_init()
502 ret = PTR_ERR(mmu); mdp4_kms_init()
505 ret = mmu->funcs->attach(mmu, iommu_ports, mdp4_kms_init()
512 mmu = NULL; mdp4_kms_init()
515 mdp4_kms->id = msm_register_mmu(dev, mmu); mdp4_kms_init()
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/
H A Dmmu.h28 struct nvkm_mmu *mmu; member in struct:nvkm_vm
H A Dfb.h5 #include <subdev/mmu.h>
/linux-4.4.14/arch/um/include/asm/
H A Dmmu_context.h10 #include <asm/mmu.h>
/linux-4.4.14/arch/xtensa/platforms/iss/
H A Dsetup.c54 /* Flush and reset the mmu, simulate a processor reset, and platform_restart()
/linux-4.4.14/include/linux/platform_data/
H A Diommu-omap.h33 * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/
H A Dlscsa_alloc.c30 #include <asm/mmu.h>
/linux-4.4.14/arch/sh/kernel/cpu/shmobile/
H A Dsleep.S98 /* save mmu and cache context if needed */
103 /* save mmu state */
295 /* restore mmu and cache state if needed */
300 /* restore mmu state */
/linux-4.4.14/arch/microblaze/include/asm/
H A Dcpuinfo.h40 u32 mmu; member in struct:cpuinfo
H A Dtlbflush.h19 #include <asm/mmu.h>
H A Dmmu_context_mm.h16 #include <asm/mmu.h>
79 * Get a new mmu context for the address space described by `mm'.
H A Dmmu.h15 # include <asm-generic/mmu.h>
/linux-4.4.14/arch/nios2/include/asm/
H A Dcpuinfo.h28 u32 mmu; member in struct:cpuinfo
/linux-4.4.14/arch/frv/include/asm/
H A Dmem-layout.h49 /* see Documentation/frv/mmu-layout.txt */
H A Dmmu.h0 /* mmu.h: memory management context for FR-V with or without MMU support
H A Dhighmem.h7 * See Documentation/frv/mmu-layout.txt for more information.
/linux-4.4.14/arch/arm/mach-omap2/
H A Domap-iommu.c63 return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); omap_iommu_init()
H A Dpdata-quirks.c85 .reset_name = "mmu",
456 OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
484 OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu",
486 OF_DEV_AUXDATA("ti,omap4-iommu", 0x55082000, "55082000.mmu",
/linux-4.4.14/drivers/gpu/drm/nouveau/
H A Dnouveau_chan.c93 struct nvkm_mmu *mmu = nvxx_mmu(device); nouveau_channel_prep() local
141 args.limit = cli->vm->mmu->limit - 1; nouveau_channel_prep()
171 args.limit = mmu->limit - 1; nouveau_channel_prep()
297 struct nvkm_mmu *mmu = nvxx_mmu(device); nouveau_channel_init() local
309 args.limit = cli->vm->mmu->limit - 1; nouveau_channel_init()
326 args.limit = cli->vm->mmu->limit - 1; nouveau_channel_init()
338 args.limit = mmu->limit - 1; nouveau_channel_init()
H A Dnouveau_display.h4 #include <subdev/mmu.h>
H A Dnouveau_ttm.c194 #include <subdev/mmu/nv04.h>
199 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); nv04_gart_manager_init() local
200 struct nv04_mmu *priv = (void *)mmu; nv04_gart_manager_init()
/linux-4.4.14/arch/sparc/mm/
H A Dultra.S12 #include <asm/mmu.h>
313 mov %o0, %o2 /* ARG2: mmu context */
328 mov %g2, %o1 /* ARG1: mmu context */
345 mov %g3, %o1 /* ARG1: mmu context */
366 mov 0, %o1 /* ARG1: mmu context */
712 mov %g5, %o2 /* ARG2: mmu context */
734 mov %g5, %o1 /* ARG1: mmu context */
762 mov 0, %o1 /* ARG1: mmu context */
H A Dtsunami.S2 * tsunami.S: High speed MicroSparc-I mmu/cache operations.
H A Dswift.S2 * swift.S: MicroSparc-II mmu/cache operations.
H A Dviking.S2 * viking.S: High speed Viking cache/mmu operations
/linux-4.4.14/include/rdma/
H A Dib_umem_odp.h62 * also protects access to the mmu notifier counters.
72 /* A linked list of umems that don't have private mmu notifier
/linux-4.4.14/arch/arm/mach-tegra/
H A Dsleep.S96 * enters suspend in LP2 by turning off the mmu and jumping to
120 * r0 = physical address to jump to with mmu off
/linux-4.4.14/arch/sparc/kernel/
H A Dwuf.S265 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg
266 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg
281 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
282 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
/linux-4.4.14/arch/c6x/kernel/
H A Dsetup.c78 const char *mmu; member in struct:cpuinfo_c6x
121 p->mmu = "none"; get_cpuinfo()
464 "mmu\t\t: %s\n" show_cpuinfo()
470 p->core_id, p->mmu, p->fpu, show_cpuinfo()
/linux-4.4.14/fs/proc/
H A Dnommu.c1 /* nommu.c: mmu-less memory info files
/linux-4.4.14/arch/metag/include/asm/
H A Dmmu_context.h7 #include <asm/mmu.h>
/linux-4.4.14/arch/microblaze/kernel/
H A Dmisc.S19 #include <asm/mmu.h>
H A Dprom.c40 #include <asm/mmu.h>
/linux-4.4.14/drivers/infiniband/core/
H A Dumem_odp.c81 /* Account for a new mmu notifier in an ib_ucontext. */ ib_ucontext_notifier_start_account()
87 /* Account for a terminating mmu notifier in an ib_ucontext.
97 /* No currently running mmu notifiers. Now is the chance to ib_ucontext_notifier_end_account()
101 /* Prevent concurrent mmu notifiers from working on the ib_ucontext_notifier_end_account()
376 * will be able to enventually obtain the mmu notifiers SRCU. Note ib_umem_odp_release()
506 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
654 * to prevent other mmu notifiers from ib_umem_odp_unmap_dma_pages()
/linux-4.4.14/include/linux/
H A Dmmu_notifier.h15 * The mmu notifier_mm structure is allocated and installed in
21 /* all mmu notifiers registerd in this mm are queued in this list */
31 * freed. This can run concurrently with other mmu notifier
33 * should tear down all secondary mmu mappings and freeze the
34 * secondary mmu. If this method isn't implemented you've to
36 * through the secondary mmu by the time the last thread with
43 * through the secondary mmu are terminated by the time the
/linux-4.4.14/drivers/dma/bestcomm/
H A Dsram.c25 #include <asm/mmu.h>
/linux-4.4.14/arch/xtensa/platforms/xt2000/
H A Dsetup.c65 /* Flush and reset the mmu, simulate a processor reset, and platform_restart()
/linux-4.4.14/arch/arc/include/asm/
H A Dtlb-mmu1.h12 #include <asm/mmu.h>
H A Darcregs.h348 struct cpuinfo_arc_mmu mmu; member in struct:cpuinfo_arc
H A Dentry.h17 #include <asm/mmu.h>
/linux-4.4.14/arch/powerpc/platforms/cell/
H A Dqpace_setup.c26 #include <asm/mmu.h>
/linux-4.4.14/arch/m32r/include/asm/
H A Dmmu_context.h16 #include <asm/mmu.h>
H A Dpgtable.h11 * M32R mmu expects.
18 * change arch/m32r/mmu.S manually.
/linux-4.4.14/arch/m68k/68360/
H A Dconfig.c2 * config.c - non-mmu 68360 platform initialization code
H A Dentry.S2 * entry.S - non-mmu 68360 interrupt and exceptions entry points
/linux-4.4.14/arch/arm/mm/
H A Didmap.c120 * results when turning off the mmu.
H A DMakefile9 mmap.o pgd.o mmu.o pageattr.o
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Ddevice.h115 struct nvkm_mmu *mmu; member in struct:nvkm_device
180 int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **); member in struct:nvkm_device_chip
/linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/
H A DMakefile58 REGDESC += $(BASEDIR)/core/cpu/mmu/doc/mmu_regs.r
121 mmu_defs_asm.h: $(BASEDIR)/core/cpu/mmu/doc/mmu_regs.r
/linux-4.4.14/arch/arm64/include/asm/
H A Dkvm_host.h74 * We don't want allocation failures within the mmu code, so we preallocate
164 /* Cache some mmu pages needed inside spinlock regions */
/linux-4.4.14/arch/arm/include/asm/
H A Dkvm_host.h76 * We don't want allocation failures within the mmu code, so we preallocate
138 /* Cache some mmu pages needed inside spinlock regions */
/linux-4.4.14/drivers/misc/sgi-gru/
H A Dgrutlbpurge.c132 * also the structure that contains the pointer to the mmu callout
134 * using the mmu "register" function. The mmu interfaces are used to
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/bar/
H A Dgf100.c28 #include <subdev/mmu.h>
H A Dnv50.c28 #include <subdev/mmu.h>
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/
H A Dsubdev.c45 [NVKM_SUBDEV_MMU ] = "mmu",
H A Dgpuobj.c29 #include <subdev/mmu.h>
/linux-4.4.14/arch/unicore32/kernel/
H A Dsleep.S176 movc p0.c1, r5, #0 @ control reg, turn on mmu
/linux-4.4.14/kernel/
H A Dsys_ni.c190 /* mmu depending weak syscall entries */
/linux-4.4.14/arch/powerpc/kernel/
H A Dcpu_setup_fsl_booke.S19 #include <asm/mmu-book3e.h>
H A Dswsusp_booke.S14 #include <asm/mmu.h>
H A Dmisc_64.S29 #include <asm/mmu.h>
569 * switch to real mode (turn mmu off)
646 /* turn off mmu */
H A Dmachine_kexec_64.c27 #include <asm/mmu.h>
70 * For non-LPAR, we absolutely can not overwrite the mmu hash default_machine_kexec_prepare()
/linux-4.4.14/arch/powerpc/mm/
H A D40x_mmu.c45 #include <asm/mmu.h>
H A Dinit_32.c41 #include <asm/mmu.h>
H A Dmmu_decl.h23 #include <asm/mmu.h>
/linux-4.4.14/arch/powerpc/platforms/44x/
H A Diss4xx.c33 #include <asm/mmu.h>
/linux-4.4.14/arch/mips/include/asm/
H A Dmmu_context.h193 /* mark mmu ownership change */ activate_mm()
/linux-4.4.14/arch/m68k/include/uapi/asm/
H A Dbootinfo.h59 #define BI_MMUTYPE 0x0004 /* mmu type (__be32) */
/linux-4.4.14/arch/mips/mm/
H A Dtlb-r3k.c2 * r2300.c: R2000 and R3000 specific mmu/cache code.
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/gr/
H A Dgf100.h31 #include <subdev/mmu.h>
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
H A Dnv50.c30 #include <subdev/mmu.h>
/linux-4.4.14/drivers/misc/cxl/
H A Dfault.c20 #include <asm/mmu.h>
/linux-4.4.14/arch/xtensa/platforms/xtfpga/
H A Dsetup.c55 /* Flush and reset the mmu, simulate a processor reset, and platform_restart()
/linux-4.4.14/arch/alpha/include/asm/
H A Dmmu_context.h5 * get a new mmu context..

Completed in 5048 milliseconds

12