Searched refs:vm (Results 1 - 200 of 251) sorted by relevance

12

/linux-4.1.27/drivers/video/
H A Dvideomode.c15 struct videomode *vm) videomode_from_timing()
17 vm->pixelclock = dt->pixelclock.typ; videomode_from_timing()
18 vm->hactive = dt->hactive.typ; videomode_from_timing()
19 vm->hfront_porch = dt->hfront_porch.typ; videomode_from_timing()
20 vm->hback_porch = dt->hback_porch.typ; videomode_from_timing()
21 vm->hsync_len = dt->hsync_len.typ; videomode_from_timing()
23 vm->vactive = dt->vactive.typ; videomode_from_timing()
24 vm->vfront_porch = dt->vfront_porch.typ; videomode_from_timing()
25 vm->vback_porch = dt->vback_porch.typ; videomode_from_timing()
26 vm->vsync_len = dt->vsync_len.typ; videomode_from_timing()
28 vm->flags = dt->flags; videomode_from_timing()
33 struct videomode *vm, unsigned int index) videomode_from_timings()
41 videomode_from_timing(dt, vm); videomode_from_timings()
14 videomode_from_timing(const struct display_timing *dt, struct videomode *vm) videomode_from_timing() argument
32 videomode_from_timings(const struct display_timings *disp, struct videomode *vm, unsigned int index) videomode_from_timings() argument
H A Dof_videomode.c19 * @vm - set to return value
26 * specified by index into *vm. This function should only be used, if
31 int of_get_videomode(struct device_node *np, struct videomode *vm, of_get_videomode() argument
46 ret = videomode_from_timings(disp, vm, index); of_get_videomode()
/linux-4.1.27/sound/pci/ctxfi/
H A Dctvmem.c29 * Find or create vm block based on requested @size.
33 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) get_vm_block() argument
39 if (size > vm->size) { get_vm_block()
45 mutex_lock(&vm->lock); get_vm_block()
46 list_for_each(pos, &vm->unused) { get_vm_block()
51 if (pos == &vm->unused) get_vm_block()
55 /* Move the vm node from unused list to used list directly */ get_vm_block()
56 list_move(&entry->list, &vm->used); get_vm_block()
57 vm->size -= size; get_vm_block()
68 list_add(&block->list, &vm->used); get_vm_block()
71 vm->size -= size; get_vm_block()
74 mutex_unlock(&vm->lock); get_vm_block()
78 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) put_vm_block() argument
85 mutex_lock(&vm->lock); put_vm_block()
87 vm->size += block->size; put_vm_block()
89 list_for_each(pos, &vm->unused) { put_vm_block()
94 if (pos == &vm->unused) { put_vm_block()
95 list_add_tail(&block->list, &vm->unused); put_vm_block()
110 while (pre != &vm->unused) { put_vm_block()
122 mutex_unlock(&vm->lock); put_vm_block()
127 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) ct_vm_map() argument
135 block = get_vm_block(vm, size, atc); ct_vm_map()
142 ptp = (unsigned long *)vm->ptp[0].area; ct_vm_map()
155 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) ct_vm_unmap() argument
158 put_vm_block(vm, block); ct_vm_unmap()
167 ct_get_ptp_phys(struct ct_vm *vm, int index) ct_get_ptp_phys() argument
171 addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; ct_get_ptp_phys()
178 struct ct_vm *vm; ct_vm_create() local
184 vm = kzalloc(sizeof(*vm), GFP_KERNEL); ct_vm_create()
185 if (!vm) ct_vm_create()
188 mutex_init(&vm->lock); ct_vm_create()
194 PAGE_SIZE, &vm->ptp[i]); ct_vm_create()
200 ct_vm_destroy(vm); ct_vm_create()
203 vm->size = CT_ADDRS_PER_PAGE * i; ct_vm_create()
204 vm->map = ct_vm_map; ct_vm_create()
205 vm->unmap = ct_vm_unmap; ct_vm_create()
206 vm->get_ptp_phys = ct_get_ptp_phys; ct_vm_create()
207 INIT_LIST_HEAD(&vm->unused); ct_vm_create()
208 INIT_LIST_HEAD(&vm->used); ct_vm_create()
212 block->size = vm->size; ct_vm_create()
213 list_add(&block->list, &vm->unused); ct_vm_create()
216 *rvm = vm; ct_vm_create()
222 void ct_vm_destroy(struct ct_vm *vm) ct_vm_destroy() argument
229 while (!list_empty(&vm->used)) { ct_vm_destroy()
230 pos = vm->used.next; ct_vm_destroy()
235 while (!list_empty(&vm->unused)) { ct_vm_destroy()
236 pos = vm->unused.next; ct_vm_destroy()
244 snd_dma_free_pages(&vm->ptp[i]); ct_vm_destroy()
246 vm->size = 0; ct_vm_destroy()
248 kfree(vm); ct_vm_destroy()
H A Dctvmem.h57 dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
61 void ct_vm_destroy(struct ct_vm *vm);
H A Dctatc.c139 struct ct_vm *vm; ct_map_audio_buffer() local
145 vm = atc->vm; ct_map_audio_buffer()
147 apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes); ct_map_audio_buffer()
157 struct ct_vm *vm; ct_unmap_audio_buffer() local
162 vm = atc->vm; ct_unmap_audio_buffer()
164 vm->unmap(vm, apcm->vm_block); ct_unmap_audio_buffer()
171 return atc->vm->get_ptp_phys(atc->vm, index); atc_get_ptp_phys()
1243 if (atc->vm) { ct_atc_destroy()
1244 ct_vm_destroy(atc->vm); ct_atc_destroy()
1245 atc->vm = NULL; ct_atc_destroy()
1713 err = ct_vm_create(&atc->vm, pci); ct_atc_create()
H A Dctatc.h89 struct ct_vm *vm; /* device virtual memory manager for this card */ member in struct:ct_atc
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c32 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_at() local
33 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_at()
38 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; nvkm_vm_map_at()
49 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; nvkm_vm_map_at()
70 mmu->flush(vm); nvkm_vm_map_at()
77 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_sg_table() local
78 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_sg_table()
83 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; nvkm_vm_map_sg_table()
92 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; nvkm_vm_map_sg_table()
128 mmu->flush(vm); nvkm_vm_map_sg_table()
135 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_sg() local
136 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_sg()
142 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; nvkm_vm_map_sg()
148 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; nvkm_vm_map_sg()
166 mmu->flush(vm); nvkm_vm_map_sg()
184 struct nvkm_vm *vm = vma->vm; nvkm_vm_unmap_at() local
185 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_unmap_at()
190 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; nvkm_vm_unmap_at()
196 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; nvkm_vm_unmap_at()
213 mmu->flush(vm); nvkm_vm_unmap_at()
223 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) nvkm_vm_unmap_pgt() argument
225 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_unmap_pgt()
232 vpgt = &vm->pgt[pde - vm->fpde]; nvkm_vm_unmap_pgt()
239 list_for_each_entry(vpgd, &vm->pgd_list, head) { nvkm_vm_unmap_pgt()
250 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) nvkm_vm_map_pgt() argument
252 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_map_pgt()
253 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; nvkm_vm_map_pgt()
264 ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, nvkm_vm_map_pgt()
279 list_for_each_entry(vpgd, &vm->pgd_list, head) { nvkm_vm_map_pgt()
287 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, nvkm_vm_get() argument
290 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_get()
297 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, nvkm_vm_get()
308 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; nvkm_vm_get()
316 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); nvkm_vm_get()
319 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); nvkm_vm_get()
320 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_get()
327 vma->vm = NULL; nvkm_vm_get()
328 nvkm_vm_ref(vm, &vma->vm, NULL); nvkm_vm_get()
337 struct nvkm_vm *vm = vma->vm; nvkm_vm_put() local
338 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_put()
347 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); nvkm_vm_put()
348 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_put()
351 nvkm_vm_ref(NULL, &vma->vm, NULL); nvkm_vm_put()
358 struct nvkm_vm *vm; nvkm_vm_create() local
362 vm = kzalloc(sizeof(*vm), GFP_KERNEL); nvkm_vm_create()
363 if (!vm) nvkm_vm_create()
366 INIT_LIST_HEAD(&vm->pgd_list); nvkm_vm_create()
367 vm->mmu = mmu; nvkm_vm_create()
368 kref_init(&vm->refcount); nvkm_vm_create()
369 vm->fpde = offset >> (mmu->pgt_bits + 12); nvkm_vm_create()
370 vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12); nvkm_vm_create()
372 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); nvkm_vm_create()
373 if (!vm->pgt) { nvkm_vm_create()
374 kfree(vm); nvkm_vm_create()
378 ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, nvkm_vm_create()
381 vfree(vm->pgt); nvkm_vm_create()
382 kfree(vm); nvkm_vm_create()
386 *pvm = vm; nvkm_vm_create()
400 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) nvkm_vm_link() argument
402 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_link()
416 for (i = vm->fpde; i <= vm->lpde; i++) nvkm_vm_link()
417 mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); nvkm_vm_link()
418 list_add(&vpgd->head, &vm->pgd_list); nvkm_vm_link()
424 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) nvkm_vm_unlink() argument
426 struct nvkm_mmu *mmu = vm->mmu; nvkm_vm_unlink()
434 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { nvkm_vm_unlink()
450 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); nvkm_vm_del() local
453 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { nvkm_vm_del()
454 nvkm_vm_unlink(vm, vpgd->obj); nvkm_vm_del()
457 nvkm_mm_fini(&vm->mm); nvkm_vm_del()
458 vfree(vm->pgt); nvkm_vm_del()
459 kfree(vm); nvkm_vm_del()
H A Dnv04.h8 struct nvkm_vm *vm; member in struct:nv04_mmu_priv
H A Dnv04.c64 nv04_vm_flush(struct nvkm_vm *vm) nv04_vm_flush() argument
109 &priv->vm); nv04_mmu_ctor()
116 &priv->vm->pgt[0].obj[0]); nv04_mmu_ctor()
117 dma = priv->vm->pgt[0].obj[0]; nv04_mmu_ctor()
118 priv->vm->pgt[0].refcount[0] = 1; nv04_mmu_ctor()
131 if (priv->vm) { nv04_mmu_dtor()
132 nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]); nv04_mmu_dtor()
133 nvkm_vm_ref(NULL, &priv->vm, NULL); nv04_mmu_dtor()
H A Dnv41.c66 nv41_vm_flush(struct nvkm_vm *vm) nv41_vm_flush() argument
68 struct nv04_mmu_priv *priv = (void *)vm->mmu; nv41_vm_flush()
116 &priv->vm); nv41_mmu_ctor()
123 &priv->vm->pgt[0].obj[0]); nv41_mmu_ctor()
124 priv->vm->pgt[0].refcount[0] = 1; nv41_mmu_ctor()
135 struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0]; nv41_mmu_init()
H A Dgf100.c115 struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu); gf100_vm_map()
159 gf100_vm_flush(struct nvkm_vm *vm) gf100_vm_flush() argument
161 struct gf100_mmu_priv *priv = (void *)vm->mmu; gf100_vm_flush()
169 if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR])) gf100_vm_flush()
173 list_for_each_entry(vpgd, &vm->pgd_list, head) { gf100_vm_flush()
178 nv_error(priv, "vm timeout 0: 0x%08x %d\n", gf100_vm_flush()
187 nv_error(priv, "vm timeout 1: 0x%08x %d\n", gf100_vm_flush()
209 ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv); gf100_mmu_ctor()
H A Dnv44.c88 struct nv04_mmu_priv *priv = (void *)vma->vm->mmu; nv44_vm_map_sg()
141 nv44_vm_flush(struct nvkm_vm *vm) nv44_vm_flush() argument
143 struct nv04_mmu_priv *priv = (void *)vm->mmu; nv44_vm_flush()
193 &priv->vm); nv44_mmu_ctor()
200 &priv->vm->pgt[0].obj[0]); nv44_mmu_ctor()
201 priv->vm->pgt[0].refcount[0] = 1; nv44_mmu_ctor()
212 struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0]; nv44_mmu_init()
H A Dnv50.c87 if (nvkm_fb(vma->vm->mmu)->ram->stolen) { nv50_vm_map()
88 phys += nvkm_fb(vma->vm->mmu)->ram->stolen; nv50_vm_map()
150 nv50_vm_flush(struct nvkm_vm *vm) nv50_vm_flush() argument
152 struct nv50_mmu_priv *priv = (void *)vm->mmu; nv50_vm_flush()
161 if (!atomic_read(&vm->engref[i])) nv50_vm_flush()
189 nv_error(priv, "vm flush timeout: engine %d\n", vme); nv50_vm_flush()
213 ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv); nv50_mmu_ctor()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_vm.c78 * radeon_vm_manager_init - init the vm manager
82 * Init the vm manager (cayman+).
100 * radeon_vm_manager_fini - tear down the vm manager
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
129 struct radeon_vm *vm, radeon_vm_get_bos()
135 list = drm_malloc_ab(vm->max_pde_used + 2, radeon_vm_get_bos()
140 /* add the vm page table to the list */ radeon_vm_get_bos()
141 list[0].robj = vm->page_directory; radeon_vm_get_bos()
144 list[0].tv.bo = &vm->page_directory->tbo; radeon_vm_get_bos()
149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { radeon_vm_get_bos()
150 if (!vm->page_tables[i].bo) radeon_vm_get_bos()
153 list[idx].robj = vm->page_tables[i].bo; radeon_vm_get_bos()
169 * @vm: vm to allocate id for
172 * Allocate an id for the vm (cayman+).
178 struct radeon_vm *vm, int ring) radeon_vm_grab_id()
181 struct radeon_vm_id *vm_id = &vm->ids[ring]; radeon_vm_grab_id()
225 * radeon_vm_flush - hardware flush the vm
228 * @vm: vm we want to flush
230 * @updates: last vm update that is waited for
232 * Flush the vm (cayman+).
237 struct radeon_vm *vm, radeon_vm_flush()
240 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); radeon_vm_flush()
241 struct radeon_vm_id *vm_id = &vm->ids[ring]; radeon_vm_flush()
246 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); radeon_vm_flush()
257 * radeon_vm_fence - remember fence for vm
260 * @vm: vm we want to fence
263 * Fence the vm (cayman+).
269 struct radeon_vm *vm, radeon_vm_fence()
272 unsigned vm_id = vm->ids[fence->ring].id; radeon_vm_fence()
277 radeon_fence_unref(&vm->ids[fence->ring].last_id_use); radeon_vm_fence()
278 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); radeon_vm_fence()
282 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
284 * @vm: requested vm
287 * Find @bo inside the requested vm (cayman+).
288 * Search inside the @bos vm list for the requested vm
293 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, radeon_vm_bo_find() argument
299 if (bo_va->vm == vm) { radeon_vm_bo_find()
307 * radeon_vm_bo_add - add a bo to a specific vm
310 * @vm: requested vm
313 * Add @bo into the requested vm (cayman+).
314 * Add @bo to the list of bos associated with the vm
320 struct radeon_vm *vm, radeon_vm_bo_add()
329 bo_va->vm = vm; radeon_vm_bo_add()
339 mutex_lock(&vm->mutex); radeon_vm_bo_add()
341 mutex_unlock(&vm->mutex); radeon_vm_bo_add()
433 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
441 * Validate and set the offset requested within the vm address space.
452 struct radeon_vm *vm = bo_va->vm; radeon_vm_bo_set_addr() local
477 mutex_lock(&vm->mutex); radeon_vm_bo_set_addr()
482 it = interval_tree_iter_first(&vm->va, soffset, eoffset); radeon_vm_bo_set_addr()
490 mutex_unlock(&vm->mutex); radeon_vm_bo_set_addr()
502 mutex_unlock(&vm->mutex); radeon_vm_bo_set_addr()
508 tmp->vm = vm; radeon_vm_bo_set_addr()
511 spin_lock(&vm->status_lock); radeon_vm_bo_set_addr()
512 list_add(&tmp->vm_status, &vm->freed); radeon_vm_bo_set_addr()
513 spin_unlock(&vm->status_lock); radeon_vm_bo_set_addr()
518 interval_tree_remove(&bo_va->it, &vm->va); radeon_vm_bo_set_addr()
526 interval_tree_insert(&bo_va->it, &vm->va); radeon_vm_bo_set_addr()
537 if (eoffset > vm->max_pde_used) radeon_vm_bo_set_addr()
538 vm->max_pde_used = eoffset; radeon_vm_bo_set_addr()
546 if (vm->page_tables[pt_idx].bo) radeon_vm_bo_set_addr()
550 mutex_unlock(&vm->mutex); radeon_vm_bo_set_addr()
566 mutex_lock(&vm->mutex); radeon_vm_bo_set_addr()
567 if (vm->page_tables[pt_idx].bo) { radeon_vm_bo_set_addr()
569 mutex_unlock(&vm->mutex); radeon_vm_bo_set_addr()
571 mutex_lock(&vm->mutex); radeon_vm_bo_set_addr()
575 vm->page_tables[pt_idx].addr = 0; radeon_vm_bo_set_addr()
576 vm->page_tables[pt_idx].bo = pt; radeon_vm_bo_set_addr()
579 mutex_unlock(&vm->mutex); radeon_vm_bo_set_addr()
632 * @vm: requested vm
643 struct radeon_vm *vm) radeon_vm_update_page_directory()
645 struct radeon_bo *pd = vm->page_directory; radeon_vm_update_page_directory()
657 ndw += vm->max_pde_used * 6; radeon_vm_update_page_directory()
669 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { radeon_vm_update_page_directory()
670 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; radeon_vm_update_page_directory()
677 if (vm->page_tables[pt_idx].addr == pt) radeon_vm_update_page_directory()
679 vm->page_tables[pt_idx].addr = pt; radeon_vm_update_page_directory()
805 * @vm: requested vm
816 struct radeon_vm *vm, radeon_vm_update_ptes()
829 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; radeon_vm_update_ptes()
878 * @vm: requested vm
887 static void radeon_vm_fence_pts(struct radeon_vm *vm, radeon_vm_fence_pts() argument
897 radeon_bo_fence(vm->page_tables[i].bo, fence, true); radeon_vm_fence_pts()
901 * radeon_vm_bo_update - map a bo into the vm page table
904 * @vm: requested vm
917 struct radeon_vm *vm = bo_va->vm; radeon_vm_bo_update() local
925 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", radeon_vm_bo_update()
926 bo_va->bo, vm); radeon_vm_bo_update()
930 spin_lock(&vm->status_lock); radeon_vm_bo_update()
932 spin_unlock(&vm->status_lock); radeon_vm_bo_update()
1005 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); radeon_vm_bo_update()
1008 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, radeon_vm_bo_update()
1025 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); radeon_vm_bo_update()
1037 * @vm: requested vm
1045 struct radeon_vm *vm) radeon_vm_clear_freed()
1050 spin_lock(&vm->status_lock); radeon_vm_clear_freed()
1051 while (!list_empty(&vm->freed)) { radeon_vm_clear_freed()
1052 bo_va = list_first_entry(&vm->freed, radeon_vm_clear_freed()
1054 spin_unlock(&vm->status_lock); radeon_vm_clear_freed()
1063 spin_lock(&vm->status_lock); radeon_vm_clear_freed()
1065 spin_unlock(&vm->status_lock); radeon_vm_clear_freed()
1074 * @vm: requested vm
1082 struct radeon_vm *vm) radeon_vm_clear_invalids()
1087 spin_lock(&vm->status_lock); radeon_vm_clear_invalids()
1088 while (!list_empty(&vm->invalidated)) { radeon_vm_clear_invalids()
1089 bo_va = list_first_entry(&vm->invalidated, radeon_vm_clear_invalids()
1091 spin_unlock(&vm->status_lock); radeon_vm_clear_invalids()
1097 spin_lock(&vm->status_lock); radeon_vm_clear_invalids()
1099 spin_unlock(&vm->status_lock); radeon_vm_clear_invalids()
1105 * radeon_vm_bo_rmv - remove a bo to a specific vm
1110 * Remove @bo_va->bo from the requested vm (cayman+).
1117 struct radeon_vm *vm = bo_va->vm; radeon_vm_bo_rmv() local
1121 mutex_lock(&vm->mutex); radeon_vm_bo_rmv()
1123 interval_tree_remove(&bo_va->it, &vm->va); radeon_vm_bo_rmv()
1124 spin_lock(&vm->status_lock); radeon_vm_bo_rmv()
1129 list_add(&bo_va->vm_status, &vm->freed); radeon_vm_bo_rmv()
1134 spin_unlock(&vm->status_lock); radeon_vm_bo_rmv()
1136 mutex_unlock(&vm->mutex); radeon_vm_bo_rmv()
1143 * @vm: requested vm
1155 spin_lock(&bo_va->vm->status_lock); radeon_vm_bo_invalidate()
1157 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); radeon_vm_bo_invalidate()
1158 spin_unlock(&bo_va->vm->status_lock); radeon_vm_bo_invalidate()
1164 * radeon_vm_init - initialize a vm instance
1167 * @vm: requested vm
1169 * Init @vm fields (cayman+).
1171 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_init() argument
1178 vm->ib_bo_va = NULL; radeon_vm_init()
1180 vm->ids[i].id = 0; radeon_vm_init()
1181 vm->ids[i].flushed_updates = NULL; radeon_vm_init()
1182 vm->ids[i].last_id_use = NULL; radeon_vm_init()
1184 mutex_init(&vm->mutex); radeon_vm_init()
1185 vm->va = RB_ROOT; radeon_vm_init()
1186 spin_lock_init(&vm->status_lock); radeon_vm_init()
1187 INIT_LIST_HEAD(&vm->invalidated); radeon_vm_init()
1188 INIT_LIST_HEAD(&vm->freed); radeon_vm_init()
1195 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); radeon_vm_init()
1196 if (vm->page_tables == NULL) { radeon_vm_init()
1203 NULL, &vm->page_directory); radeon_vm_init()
1207 r = radeon_vm_clear_bo(rdev, vm->page_directory); radeon_vm_init()
1209 radeon_bo_unref(&vm->page_directory); radeon_vm_init()
1210 vm->page_directory = NULL; radeon_vm_init()
1218 * radeon_vm_fini - tear down a vm instance
1221 * @vm: requested vm
1223 * Tear down @vm (cayman+).
1224 * Unbind the VM and remove all bos from the vm bo list
1226 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_fini() argument
1231 if (!RB_EMPTY_ROOT(&vm->va)) { radeon_vm_fini()
1232 dev_err(rdev->dev, "still active bo inside vm\n"); radeon_vm_fini()
1234 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { radeon_vm_fini()
1235 interval_tree_remove(&bo_va->it, &vm->va); radeon_vm_fini()
1244 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { radeon_vm_fini()
1251 radeon_bo_unref(&vm->page_tables[i].bo); radeon_vm_fini()
1252 kfree(vm->page_tables); radeon_vm_fini()
1254 radeon_bo_unref(&vm->page_directory); radeon_vm_fini()
1257 radeon_fence_unref(&vm->ids[i].flushed_updates); radeon_vm_fini()
1258 radeon_fence_unref(&vm->ids[i].last_id_use); radeon_vm_fini()
1261 mutex_destroy(&vm->mutex); radeon_vm_fini()
128 radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_vm *vm, struct list_head *head) radeon_vm_get_bos() argument
177 radeon_vm_grab_id(struct radeon_device *rdev, struct radeon_vm *vm, int ring) radeon_vm_grab_id() argument
236 radeon_vm_flush(struct radeon_device *rdev, struct radeon_vm *vm, int ring, struct radeon_fence *updates) radeon_vm_flush() argument
268 radeon_vm_fence(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_fence *fence) radeon_vm_fence() argument
319 radeon_vm_bo_add(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo) radeon_vm_bo_add() argument
642 radeon_vm_update_page_directory(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_update_page_directory() argument
815 radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) radeon_vm_update_ptes() argument
1044 radeon_vm_clear_freed(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_clear_freed() argument
1081 radeon_vm_clear_invalids(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_clear_invalids() argument
H A Dradeon_ib.c56 struct radeon_ib *ib, struct radeon_vm *vm, radeon_ib_get()
72 ib->vm = vm; radeon_ib_get()
73 if (vm) { radeon_ib_get()
141 /* grab a vm id if necessary */ radeon_ib_schedule()
142 if (ib->vm) { radeon_ib_schedule()
144 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); radeon_ib_schedule()
156 if (ib->vm) radeon_ib_schedule()
157 radeon_vm_flush(rdev, ib->vm, ib->ring, radeon_ib_schedule()
175 if (ib->vm) radeon_ib_schedule()
176 radeon_vm_fence(rdev, ib->vm, ib->fence); radeon_ib_schedule()
55 radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size) radeon_ib_get() argument
H A Dradeon_cs.c177 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, radeon_cs_parser_relocs()
479 struct radeon_vm *vm) radeon_bo_vm_update_pte()
485 r = radeon_vm_update_page_directory(rdev, vm); radeon_bo_vm_update_pte()
489 r = radeon_vm_clear_freed(rdev, vm); radeon_bo_vm_update_pte()
493 if (vm->ib_bo_va == NULL) { radeon_bo_vm_update_pte()
498 r = radeon_vm_bo_update(rdev, vm->ib_bo_va, radeon_bo_vm_update_pte()
507 bo_va = radeon_vm_bo_find(vm, bo); radeon_bo_vm_update_pte()
509 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); radeon_bo_vm_update_pte()
520 return radeon_vm_clear_invalids(rdev, vm); radeon_bo_vm_update_pte()
527 struct radeon_vm *vm = &fpriv->vm; radeon_cs_ib_vm_chunk() local
550 mutex_lock(&vm->mutex); radeon_cs_ib_vm_chunk()
551 r = radeon_bo_vm_update_pte(parser, vm); radeon_cs_ib_vm_chunk()
571 mutex_unlock(&vm->mutex); radeon_cs_ib_vm_chunk()
588 struct radeon_vm *vm = NULL; radeon_cs_ib_fill() local
596 vm = &fpriv->vm; radeon_cs_ib_fill()
606 vm, ib_chunk->length_dw * 4); radeon_cs_ib_fill()
628 vm, ib_chunk->length_dw * 4); radeon_cs_ib_fill()
478 radeon_bo_vm_update_pte(struct radeon_cs_parser *p, struct radeon_vm *vm) radeon_bo_vm_update_pte() argument
H A Dradeon_gem.c146 struct radeon_vm *vm = &fpriv->vm; radeon_gem_object_open() local
160 bo_va = radeon_vm_bo_find(vm, rbo); radeon_gem_object_open()
162 bo_va = radeon_vm_bo_add(rdev, vm, rbo); radeon_gem_object_open()
177 struct radeon_vm *vm = &fpriv->vm; radeon_gem_object_close() local
192 bo_va = radeon_vm_bo_find(vm, rbo); radeon_gem_object_close()
550 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); radeon_gem_va_update_vm()
566 mutex_lock(&bo_va->vm->mutex); radeon_gem_va_update_vm()
567 r = radeon_vm_clear_freed(rdev, bo_va->vm); radeon_gem_va_update_vm()
575 mutex_unlock(&bo_va->vm->mutex); radeon_gem_va_update_vm()
658 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); radeon_gem_va_ioctl()
H A Dradeon_kms.c389 /* this is where we report if vm is supported or not */ radeon_info_ioctl()
395 /* this is where we report if vm is supported or not */ radeon_info_ioctl()
615 * On device open, init vm on cayman+ (all asics).
632 struct radeon_vm *vm; radeon_driver_open_kms() local
641 vm = &fpriv->vm; radeon_driver_open_kms()
642 r = radeon_vm_init(rdev, vm); radeon_driver_open_kms()
650 radeon_vm_fini(rdev, vm); radeon_driver_open_kms()
657 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, radeon_driver_open_kms()
659 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, radeon_driver_open_kms()
664 radeon_vm_fini(rdev, vm); radeon_driver_open_kms()
683 * On device post close, tear down vm on cayman+ (all asics).
693 struct radeon_vm *vm = &fpriv->vm; radeon_driver_postclose_kms() local
699 if (vm->ib_bo_va) radeon_driver_postclose_kms()
700 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_driver_postclose_kms()
703 radeon_vm_fini(rdev, vm); radeon_driver_postclose_kms()
H A Dradeon.h466 /* bo virtual address in a specific vm */
475 /* protected by vm mutex */
480 struct radeon_vm *vm; member in struct:radeon_bo_va
843 struct radeon_vm *vm; member in struct:radeon_ib
965 /* is vm enabled? */
975 struct radeon_vm vm; member in struct:radeon_fpriv
1018 struct radeon_ib *ib, struct radeon_vm *vm,
1890 } vm; member in struct:radeon_asic
2883 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2884 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2885 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2886 #define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2887 #define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2888 #define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
3001 * vm
3005 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
3006 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
3008 struct radeon_vm *vm,
3011 struct radeon_vm *vm, int ring);
3013 struct radeon_vm *vm,
3016 struct radeon_vm *vm,
3020 struct radeon_vm *vm);
3022 struct radeon_vm *vm);
3024 struct radeon_vm *vm);
3030 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
3033 struct radeon_vm *vm,
H A Dni_dma.c126 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; cayman_dma_ring_ib_execute()
H A Dradeon_asic.c1677 .vm = {
1782 .vm = {
1917 .vm = {
2084 .vm = {
2197 .vm = {
H A Dni.c1300 * on the fly in the vm part of radeon_gart.c cayman_pcie_gart_enable()
1409 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; cayman_ring_ib_execute()
2129 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); cayman_startup()
2340 * vm
2522 * cayman_vm_flush - vm flush using the CP
H A Dcik_sdma.c137 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf; cik_sdma_ring_ib_execute()
935 * cik_dma_vm_flush - cik vm flush using sDMA
H A Dcik.c4119 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; cik_ring_ib_execute()
5776 * by the radeon vm/hsa code.
5872 /* set vm size, must be a multiple of 4 */ cik_pcie_gart_enable()
5979 * cik_pcie_gart_fini - vm fini callback
5992 /* vm parser */
5994 * cik_ib_parse - vm ib_parse callback
6007 * vm
6010 * by the radeon vm/hsa code.
6013 * cik_vm_init - cik vm init callback
6017 * Inits cik specific vm parameters (number of VMs, base of vram for
6042 * cik_vm_fini - cik vm fini callback
6082 * cik_vm_flush - cik vm flush using the CP
8711 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); cik_startup()
/linux-4.1.27/arch/mips/math-emu/
H A Dieee754int.h58 #define EXPLODESP(v, vc, vs, ve, vm) \
62 vm = SPMANT(v); \
64 if (vm == 0) \
66 else if (vm & SP_MBIT(SP_FBITS-1)) \
71 if (vm) { \
78 vm |= SP_HIDDEN_BIT; \
92 #define EXPLODEDP(v, vc, vs, ve, vm) \
94 vm = DPMANT(v); \
98 if (vm == 0) \
100 else if (vm & DP_MBIT(DP_FBITS-1)) \
105 if (vm) { \
112 vm |= DP_HIDDEN_BIT; \
119 #define FLUSHDP(v, vc, vs, ve, vm) \
125 vm = 0; \
130 #define FLUSHSP(v, vc, vs, ve, vm) \
136 vm = 0; \
/linux-4.1.27/drivers/video/fbdev/omap2/dss/
H A Ddisplay.c270 void videomode_to_omap_video_timings(const struct videomode *vm, videomode_to_omap_video_timings() argument
275 ovt->pixelclock = vm->pixelclock; videomode_to_omap_video_timings()
276 ovt->x_res = vm->hactive; videomode_to_omap_video_timings()
277 ovt->hbp = vm->hback_porch; videomode_to_omap_video_timings()
278 ovt->hfp = vm->hfront_porch; videomode_to_omap_video_timings()
279 ovt->hsw = vm->hsync_len; videomode_to_omap_video_timings()
280 ovt->y_res = vm->vactive; videomode_to_omap_video_timings()
281 ovt->vbp = vm->vback_porch; videomode_to_omap_video_timings()
282 ovt->vfp = vm->vfront_porch; videomode_to_omap_video_timings()
283 ovt->vsw = vm->vsync_len; videomode_to_omap_video_timings()
285 ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ? videomode_to_omap_video_timings()
288 ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ? videomode_to_omap_video_timings()
291 ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ? videomode_to_omap_video_timings()
294 ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ? videomode_to_omap_video_timings()
303 struct videomode *vm) omap_video_timings_to_videomode()
305 memset(vm, 0, sizeof(*vm)); omap_video_timings_to_videomode()
307 vm->pixelclock = ovt->pixelclock; omap_video_timings_to_videomode()
309 vm->hactive = ovt->x_res; omap_video_timings_to_videomode()
310 vm->hback_porch = ovt->hbp; omap_video_timings_to_videomode()
311 vm->hfront_porch = ovt->hfp; omap_video_timings_to_videomode()
312 vm->hsync_len = ovt->hsw; omap_video_timings_to_videomode()
313 vm->vactive = ovt->y_res; omap_video_timings_to_videomode()
314 vm->vback_porch = ovt->vbp; omap_video_timings_to_videomode()
315 vm->vfront_porch = ovt->vfp; omap_video_timings_to_videomode()
316 vm->vsync_len = ovt->vsw; omap_video_timings_to_videomode()
319 vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH; omap_video_timings_to_videomode()
321 vm->flags |= DISPLAY_FLAGS_HSYNC_LOW; omap_video_timings_to_videomode()
324 vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH; omap_video_timings_to_videomode()
326 vm->flags |= DISPLAY_FLAGS_VSYNC_LOW; omap_video_timings_to_videomode()
329 vm->flags |= DISPLAY_FLAGS_DE_HIGH; omap_video_timings_to_videomode()
331 vm->flags |= DISPLAY_FLAGS_DE_LOW; omap_video_timings_to_videomode()
334 vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; omap_video_timings_to_videomode()
336 vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; omap_video_timings_to_videomode()
302 omap_video_timings_to_videomode(const struct omap_video_timings *ovt, struct videomode *vm) omap_video_timings_to_videomode() argument
H A Ddsi.c4400 struct omap_video_timings vm = { 0 }; print_dsi_dispc_vm() local
4411 vm.pixelclock = pck; print_dsi_dispc_vm()
4412 vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk); print_dsi_dispc_vm()
4413 vm.hbp = div64_u64((u64)t->hbp * pck, byteclk); print_dsi_dispc_vm()
4414 vm.hfp = div64_u64((u64)t->hfp * pck, byteclk); print_dsi_dispc_vm()
4415 vm.x_res = t->hact; print_dsi_dispc_vm()
4417 print_dispc_vm(str, &vm); print_dsi_dispc_vm()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_trace.h116 __field(struct i915_address_space *, vm)
124 __entry->vm = vma->vm;
130 TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
133 __entry->vm)
142 __field(struct i915_address_space *, vm)
149 __entry->vm = vma->vm;
154 TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
155 __entry->obj, __entry->offset, __entry->size, __entry->vm)
158 #define VM_TO_TRACE_NAME(vm) \
159 (i915_is_ggtt(vm) ? "G" : \
163 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
164 TP_ARGS(vm, start, length, name),
167 __field(struct i915_address_space *, vm)
174 __entry->vm = vm;
180 TP_printk("vm=%p (%s), 0x%llx-0x%llx",
181 __entry->vm, __get_str(name), __entry->start, __entry->end)
185 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
186 TP_ARGS(vm, start, length, name)
190 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
191 TP_ARGS(vm, pde, start, pde_shift),
194 __field(struct i915_address_space *, vm)
201 __entry->vm = vm;
207 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
208 __entry->vm, __entry->pde, __entry->start, __entry->end)
212 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
213 TP_ARGS(vm, pde, start, pde_shift)
222 TP_PROTO(struct i915_address_space *vm, u32 pde,
224 TP_ARGS(vm, pde, pt, first, count, bits),
227 __field(struct i915_address_space *, vm)
235 __entry->vm = vm;
246 TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
247 __entry->vm, __entry->pde, __entry->last, __entry->first,
252 TP_PROTO(struct i915_address_space *vm, u32 pde,
254 TP_ARGS(vm, pde, pt, first, count, bits)
410 TP_PROTO(struct i915_address_space *vm),
411 TP_ARGS(vm),
415 __field(struct i915_address_space *, vm)
419 __entry->dev = vm->dev->primary->index;
420 __entry->vm = vm;
423 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
712 * These traces identify the ppgtt through the vm pointer, which is also printed
716 TP_PROTO(struct i915_address_space *vm),
717 TP_ARGS(vm),
720 __field(struct i915_address_space *, vm)
725 __entry->vm = vm;
726 __entry->dev = vm->dev->primary->index;
729 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
733 TP_PROTO(struct i915_address_space *vm),
734 TP_ARGS(vm)
738 TP_PROTO(struct i915_address_space *vm),
739 TP_ARGS(vm)
746 * If full ppgtt is enabled, they also print the address of the vm assigned to
756 __field(struct i915_address_space *, vm)
761 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
766 __entry->dev, __entry->ctx, __entry->vm)
783 * in the lifetime of the vm in the legacy submission path. This tracepoint is
794 __field(struct i915_address_space *, vm)
801 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
806 __entry->dev, __entry->ring, __entry->to, __entry->vm)
H A Di915_gem_evict.c52 * @vm: address space to evict from
74 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, i915_gem_evict_something() argument
110 if (start != 0 || end != vm->total) { i915_gem_evict_something()
111 drm_mm_init_scan_with_range(&vm->mm, min_size, i915_gem_evict_something()
115 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); i915_gem_evict_something()
119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { i915_gem_evict_something()
128 list_for_each_entry(vma, &vm->active_list, mm_list) { i915_gem_evict_something()
203 * i915_gem_evict_vm - Evict all idle vmas from a vm
204 * @vm: Address space to cleanse
207 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
216 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) i915_gem_evict_vm() argument
221 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex)); i915_gem_evict_vm()
222 trace_i915_gem_evict_vm(vm); i915_gem_evict_vm()
225 ret = i915_gpu_idle(vm->dev); i915_gem_evict_vm()
229 i915_gem_retire_requests(vm->dev); i915_gem_evict_vm()
231 WARN_ON(!list_empty(&vm->active_list)); i915_gem_evict_vm()
234 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) i915_gem_evict_vm()
253 struct i915_address_space *vm, *v; i915_gem_evict_everything() local
257 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { i915_gem_evict_everything()
258 lists_empty = (list_empty(&vm->inactive_list) && i915_gem_evict_everything()
259 list_empty(&vm->active_list)); i915_gem_evict_everything()
280 list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link) i915_gem_evict_everything()
281 WARN_ON(i915_gem_evict_vm(vm, false)); i915_gem_evict_everything()
H A Di915_gem_gtt.c482 static void gen8_ppgtt_clear_range(struct i915_address_space *vm, gen8_ppgtt_clear_range() argument
488 container_of(vm, struct i915_hw_ppgtt, base); gen8_ppgtt_clear_range()
542 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, gen8_ppgtt_insert_entries() argument
548 container_of(vm, struct i915_hw_ppgtt, base); gen8_ppgtt_insert_entries()
652 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) gen8_ppgtt_cleanup() argument
655 container_of(vm, struct i915_hw_ppgtt, base); gen8_ppgtt_cleanup()
862 struct i915_address_space *vm = &ppgtt->base; gen6_dump_ppgtt() local
868 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); gen6_dump_ppgtt()
873 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, gen6_dump_ppgtt()
1094 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, gen6_ppgtt_clear_range() argument
1100 container_of(vm, struct i915_hw_ppgtt, base); gen6_ppgtt_clear_range()
1108 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); gen6_ppgtt_clear_range()
1128 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, gen6_ppgtt_insert_entries() argument
1134 container_of(vm, struct i915_hw_ppgtt, base); gen6_ppgtt_insert_entries()
1147 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), gen6_ppgtt_insert_entries()
1167 /* If current vm != vm, */ mark_tlbs_dirty()
1171 static void gen6_initialize_pt(struct i915_address_space *vm, gen6_initialize_pt() argument
1177 WARN_ON(vm->scratch.addr == 0); gen6_initialize_pt()
1179 scratch_pte = vm->pte_encode(vm->scratch.addr, gen6_initialize_pt()
1190 static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_alloc_va_range() argument
1194 struct drm_device *dev = vm->dev; gen6_alloc_va_range()
1197 container_of(vm, struct i915_hw_ppgtt, base); gen6_alloc_va_range()
1227 gen6_initialize_pt(vm, pt); gen6_alloc_va_range()
1231 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); gen6_alloc_va_range()
1247 trace_i915_page_table_entry_map(vm, pde, pt, gen6_alloc_va_range()
1269 unmap_and_free_pt(pt, vm->dev); for_each_set_bit()
1291 static void gen6_ppgtt_cleanup(struct i915_address_space *vm) gen6_ppgtt_cleanup() argument
1294 container_of(vm, struct i915_hw_ppgtt, base); gen6_ppgtt_cleanup()
1547 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, ppgtt_bind_vma()
1553 vma->vm->clear_range(vma->vm, ppgtt_unbind_vma()
1660 struct i915_address_space *vm; i915_gem_restore_gtt_mappings() local
1699 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { i915_gem_restore_gtt_mappings()
1703 container_of(vm, struct i915_hw_ppgtt, i915_gem_restore_gtt_mappings()
1706 if (i915_is_ggtt(vm)) i915_gem_restore_gtt_mappings()
1740 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, gen8_ggtt_insert_entries() argument
1745 struct drm_i915_private *dev_priv = vm->dev->dev_private; gen8_ggtt_insert_entries()
1786 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, gen6_ggtt_insert_entries() argument
1791 struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_ggtt_insert_entries()
1801 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); gen6_ggtt_insert_entries()
1813 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); gen6_ggtt_insert_entries()
1824 static void gen8_ggtt_clear_range(struct i915_address_space *vm, gen8_ggtt_clear_range() argument
1829 struct drm_i915_private *dev_priv = vm->dev->dev_private; gen8_ggtt_clear_range()
1842 scratch_pte = gen8_pte_encode(vm->scratch.addr, gen8_ggtt_clear_range()
1850 static void gen6_ggtt_clear_range(struct i915_address_space *vm, gen6_ggtt_clear_range() argument
1855 struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_ggtt_clear_range()
1868 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); gen6_ggtt_clear_range()
1884 BUG_ON(!i915_is_ggtt(vma->vm)); i915_ggtt_bind_vma()
1889 static void i915_ggtt_clear_range(struct i915_address_space *vm, i915_ggtt_clear_range() argument
1904 BUG_ON(!i915_is_ggtt(vma->vm)); i915_ggtt_unbind_vma()
1913 struct drm_device *dev = vma->vm->dev; ggtt_bind_vma()
1922 if (i915_is_ggtt(vma->vm)) ggtt_bind_vma()
1939 vma->vm->insert_entries(vma->vm, pages, ggtt_bind_vma()
1959 struct drm_device *dev = vma->vm->dev; ggtt_unbind_vma()
1964 vma->vm->clear_range(vma->vm, ggtt_unbind_vma()
2112 struct i915_address_space *vm = &dev_priv->gtt.base; i915_global_gtt_cleanup() local
2120 if (drm_mm_initialized(&vm->mm)) { i915_global_gtt_cleanup()
2124 drm_mm_takedown(&vm->mm); i915_global_gtt_cleanup()
2125 list_del(&vm->global_link); i915_global_gtt_cleanup()
2128 vm->cleanup(vm); i915_global_gtt_cleanup()
2431 static void gen6_gmch_remove(struct i915_address_space *vm) gen6_gmch_remove() argument
2434 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); gen6_gmch_remove()
2437 teardown_scratch_page(vm->dev); gen6_gmch_remove()
2466 static void i915_gmch_remove(struct i915_address_space *vm) i915_gmch_remove() argument
2528 struct i915_address_space *vm, __i915_gem_vma_create()
2533 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) __i915_gem_vma_create()
2542 vma->vm = vm; __i915_gem_vma_create()
2545 if (INTEL_INFO(vm->dev)->gen >= 6) { __i915_gem_vma_create()
2546 if (i915_is_ggtt(vm)) { __i915_gem_vma_create()
2556 BUG_ON(!i915_is_ggtt(vm)); __i915_gem_vma_create()
2563 if (!i915_is_ggtt(vm)) __i915_gem_vma_create()
2564 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); __i915_gem_vma_create()
2571 struct i915_address_space *vm) i915_gem_obj_lookup_or_create_vma()
2575 vma = i915_gem_obj_to_vma(obj, vm); i915_gem_obj_lookup_or_create_vma()
2577 vma = __i915_gem_vma_create(obj, vm, i915_gem_obj_lookup_or_create_vma()
2578 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); i915_gem_obj_lookup_or_create_vma()
2749 if (i915_is_ggtt(vma->vm)) { i915_vma_bind()
2527 __i915_gem_vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *ggtt_view) __i915_gem_vma_create() argument
2570 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) i915_gem_obj_lookup_or_create_vma() argument
H A Di915_gpu_error.c404 err_printf(m, "vm[%d]\n", i); i915_error_state_to_str()
579 struct i915_address_space *vm) i915_error_object_create()
597 if (i915_gem_obj_bound(src, vm)) i915_error_object_create()
598 dst->gtt_offset = i915_gem_obj_offset(src, vm); i915_error_object_create()
603 if (i915_is_ggtt(vm)) i915_error_object_create()
720 struct i915_address_space *vm) capture_pinned_bo()
733 if (vma->vm == vm && vma->pin_count > 0) list_for_each_entry()
989 struct i915_address_space *vm; i915_gem_record_rings() local
991 vm = request->ctx && request->ctx->ppgtt ? i915_gem_record_rings()
1002 vm); i915_gem_record_rings()
1076 struct i915_address_space *vm, i915_gem_capture_vm()
1085 list_for_each_entry(vma, &vm->active_list, mm_list) i915_gem_capture_vm()
1091 if (vma->vm == vm && vma->pin_count > 0) i915_gem_capture_vm()
1106 &vm->active_list); i915_gem_capture_vm()
1112 &dev_priv->mm.bound_list, vm); i915_gem_capture_vm()
1120 struct i915_address_space *vm; i915_gem_capture_buffers() local
1123 list_for_each_entry(vm, &dev_priv->vm_list, global_link) i915_gem_capture_buffers()
1147 list_for_each_entry(vm, &dev_priv->vm_list, global_link) i915_gem_capture_buffers()
1148 i915_gem_capture_vm(dev_priv, error, vm, i++); i915_gem_capture_buffers()
577 i915_error_object_create(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src, struct i915_address_space *vm) i915_error_object_create() argument
718 capture_pinned_bo(struct drm_i915_error_buffer *err, int count, struct list_head *head, struct i915_address_space *vm) capture_pinned_bo() argument
1074 i915_gem_capture_vm(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct i915_address_space *vm, const int ndx) i915_gem_capture_vm() argument
H A Di915_gem.c2215 list_move_tail(&vma->mm_list, &vma->vm->active_list); i915_vma_move_to_active()
2229 list_move_tail(&vma->mm_list, &vma->vm->inactive_list); i915_gem_object_move_to_inactive()
2986 if (i915_is_ggtt(vma->vm) && i915_vma_unbind()
3001 if (i915_is_ggtt(vma->vm)) { i915_vma_unbind()
3402 * domains and dying. During vm initialisation, we decide whether or not i915_gem_valid_gtt_space()
3406 if (vma->vm->mm.color_adjust == NULL) i915_gem_valid_gtt_space()
3431 struct i915_address_space *vm, i915_gem_object_bind_to_vm()
3442 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; i915_gem_object_bind_to_vm()
3446 if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) i915_gem_object_bind_to_vm()
3488 i915_gem_obj_lookup_or_create_vma(obj, vm); i915_gem_object_bind_to_vm()
3494 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, i915_gem_object_bind_to_vm()
3501 ret = i915_gem_evict_something(dev, vm, size, alignment, i915_gem_object_bind_to_vm()
3520 if (vma->vm->allocate_va_range) { i915_gem_object_bind_to_vm()
3521 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, i915_gem_object_bind_to_vm()
3522 VM_TO_TRACE_NAME(vma->vm)); i915_gem_object_bind_to_vm()
3523 ret = vma->vm->allocate_va_range(vma->vm, i915_gem_object_bind_to_vm()
3537 list_add_tail(&vma->mm_list, &vm->inactive_list); i915_gem_object_bind_to_vm()
4104 struct i915_address_space *vm, i915_gem_object_do_pin()
4114 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) i915_gem_object_do_pin()
4117 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) i915_gem_object_do_pin()
4123 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) i915_gem_object_do_pin()
4127 i915_gem_obj_to_vma(obj, vm); i915_gem_object_do_pin()
4139 i915_gem_obj_offset(obj, vm); i915_gem_object_do_pin()
4163 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, i915_gem_object_do_pin()
4207 struct i915_address_space *vm, i915_gem_object_pin()
4211 return i915_gem_object_do_pin(obj, vm, i915_gem_object_pin()
4212 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, i915_gem_object_pin()
4536 struct i915_address_space *vm) i915_gem_obj_to_vma()
4540 if (i915_is_ggtt(vma->vm) && i915_gem_obj_to_vma()
4543 if (vma->vm == vm) i915_gem_obj_to_vma()
4559 if (vma->vm == ggtt && i915_gem_obj_to_ggtt_view()
4567 struct i915_address_space *vm = NULL; i915_gem_vma_destroy() local
4574 vm = vma->vm; i915_gem_vma_destroy()
4576 if (!i915_is_ggtt(vm)) i915_gem_vma_destroy()
4577 i915_ppgtt_put(i915_vm_to_ppgtt(vm)); i915_gem_vma_destroy()
4938 struct i915_address_space *vm) i915_init_vm()
4940 if (!i915_is_ggtt(vm)) i915_init_vm()
4941 drm_mm_init(&vm->mm, vm->start, vm->total); i915_init_vm()
4942 vm->dev = dev_priv->dev; i915_init_vm()
4943 INIT_LIST_HEAD(&vm->active_list); i915_init_vm()
4944 INIT_LIST_HEAD(&vm->inactive_list); i915_init_vm()
4945 INIT_LIST_HEAD(&vm->global_link); i915_init_vm()
4946 list_add_tail(&vm->global_link, &dev_priv->vm_list); i915_init_vm()
5095 struct i915_address_space *vm) i915_gem_obj_offset()
5100 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); i915_gem_obj_offset()
5103 if (i915_is_ggtt(vma->vm) && i915_gem_obj_offset()
5106 if (vma->vm == vm) i915_gem_obj_offset()
5111 i915_is_ggtt(vm) ? "global" : "ppgtt"); i915_gem_obj_offset()
5123 if (vma->vm == ggtt && i915_gem_obj_ggtt_offset_view()
5132 struct i915_address_space *vm) i915_gem_obj_bound()
5137 if (i915_is_ggtt(vma->vm) && i915_gem_obj_bound()
5140 if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) i915_gem_obj_bound()
5154 if (vma->vm == ggtt && i915_gem_obj_ggtt_bound_view()
5174 struct i915_address_space *vm) i915_gem_obj_size()
5179 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); i915_gem_obj_size()
5184 if (i915_is_ggtt(vma->vm) && i915_gem_obj_size()
5187 if (vma->vm == vm) i915_gem_obj_size()
5197 if (i915_is_ggtt(vma->vm) && i915_gem_obj_is_pinned()
3430 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *ggtt_view, unsigned alignment, uint64_t flags) i915_gem_object_bind_to_vm() argument
4103 i915_gem_object_do_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *ggtt_view, uint32_t alignment, uint64_t flags) i915_gem_object_do_pin() argument
4206 i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, uint64_t flags) i915_gem_object_pin() argument
4535 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) i915_gem_obj_to_vma() argument
4937 i915_init_vm(struct drm_i915_private *dev_priv, struct i915_address_space *vm) i915_init_vm() argument
5094 i915_gem_obj_offset(struct drm_i915_gem_object *o, struct i915_address_space *vm) i915_gem_obj_offset() argument
5131 i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_address_space *vm) i915_gem_obj_bound() argument
5173 i915_gem_obj_size(struct drm_i915_gem_object *o, struct i915_address_space *vm) i915_gem_obj_size() argument
H A Di915_gem_execbuffer.c96 struct i915_address_space *vm, eb_lookup_vmas()
143 * from the (obj, vm) we don't run the risk of creating eb_lookup_vmas()
144 * duplicated vmas for the same vm. eb_lookup_vmas()
146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); eb_lookup_vmas()
604 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); i915_gem_execbuffer_reserve_vma()
607 ret = i915_gem_object_pin(obj, vma->vm, i915_gem_execbuffer_reserve_vma()
645 if (!i915_is_ggtt(vma->vm)) need_reloc_mappable()
665 !i915_is_ggtt(vma->vm)); eb_vma_misplaced()
689 struct i915_address_space *vm; i915_gem_execbuffer_reserve() local
696 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; i915_gem_execbuffer_reserve()
772 ret = i915_gem_evict_vm(vm, true);
787 struct i915_address_space *vm; i915_gem_execbuffer_relocate_slow() local
794 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; i915_gem_execbuffer_relocate_slow()
865 ret = eb_lookup_vmas(eb, exec, args, vm, file); i915_gem_execbuffer_relocate_slow()
1413 struct i915_address_space *vm; i915_gem_do_execbuffer() local
1504 vm = &ctx->ppgtt->base; i915_gem_do_execbuffer()
1506 vm = &dev_priv->gtt.base; i915_gem_do_execbuffer()
1517 ret = eb_lookup_vmas(eb, exec, args, vm, file); i915_gem_do_execbuffer()
1602 exec_start += i915_gem_obj_offset(batch_obj, vm); i915_gem_do_execbuffer()
93 eb_lookup_vmas(struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, const struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm, struct drm_file *file) eb_lookup_vmas() argument
H A Di915_gem_gtt.h156 struct i915_address_space *vm; member in struct:i915_vma
270 int (*allocate_va_range)(struct i915_address_space *vm,
273 void (*clear_range)(struct i915_address_space *vm,
277 void (*insert_entries)(struct i915_address_space *vm,
281 void (*cleanup)(struct i915_address_space *vm);
H A Di915_drv.h769 * @vm: virtual memory space used by this context.
2613 struct i915_address_space *vm);
2624 struct i915_address_space *vm,
2802 struct i915_address_space *vm);
2813 struct i915_address_space *vm);
2816 struct i915_address_space *vm);
2819 struct i915_address_space *vm);
2826 struct i915_address_space *vm);
2841 static inline bool i915_is_ggtt(struct i915_address_space *vm) i915_is_ggtt() argument
2844 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; i915_is_ggtt()
2845 return vm == ggtt; i915_is_ggtt()
2849 i915_vm_to_ppgtt(struct i915_address_space *vm) i915_vm_to_ppgtt() argument
2851 WARN_ON(i915_is_ggtt(vm)); i915_vm_to_ppgtt()
2853 return container_of(vm, struct i915_hw_ppgtt, base); i915_vm_to_ppgtt()
2931 struct i915_address_space *vm,
2938 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
H A Di915_debugfs.c152 if (!i915_is_ggtt(vma->vm)) describe_obj()
192 struct i915_address_space *vm = &dev_priv->gtt.base; i915_gem_object_list_info() local
205 head = &vm->active_list; i915_gem_object_list_info()
209 head = &vm->inactive_list; i915_gem_object_list_info()
331 if (i915_is_ggtt(vma->vm)) { per_file_stats()
336 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); per_file_stats()
410 struct i915_address_space *vm = &dev_priv->gtt.base; i915_gem_object_info() local
429 count_vmas(&vm->active_list, mm_list); i915_gem_object_info()
434 count_vmas(&vm->inactive_list, mm_list); i915_gem_object_info()
/linux-4.1.27/include/video/
H A Dof_videomode.h15 int of_get_videomode(struct device_node *np, struct videomode *vm,
H A Dvideomode.h38 * @vm: return value
44 struct videomode *vm);
49 * @vm: return value
56 struct videomode *vm, unsigned int index);
H A Domapdss.h862 void videomode_to_omap_video_timings(const struct videomode *vm,
865 struct videomode *vm);
/linux-4.1.27/tools/testing/selftests/vm/
H A DMakefile1 # Makefile for vm selftests
H A Dhugetlbfstest.c76 system("echo 100 > /proc/sys/vm/nr_hugepages"); main()
/linux-4.1.27/arch/arm/mm/
H A Dioremap.c49 struct vm_struct *vm; find_static_vm_paddr() local
52 vm = &svm->vm; find_static_vm_paddr()
53 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) find_static_vm_paddr()
55 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) find_static_vm_paddr()
58 if (vm->phys_addr > paddr || find_static_vm_paddr()
59 paddr + size - 1 > vm->phys_addr + vm->size - 1) find_static_vm_paddr()
71 struct vm_struct *vm; find_static_vm_vaddr() local
74 vm = &svm->vm; find_static_vm_vaddr()
77 if (vm->addr > vaddr) find_static_vm_vaddr()
80 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) find_static_vm_vaddr()
90 struct vm_struct *vm; add_static_vm_early() local
93 vm = &svm->vm; add_static_vm_early()
94 vm_area_add_early(vm); add_static_vm_early()
95 vaddr = vm->addr; add_static_vm_early()
98 vm = &curr_svm->vm; add_static_vm_early()
100 if (vm->addr > vaddr) add_static_vm_early()
292 addr = (unsigned long)svm->vm.addr; __arm_ioremap_pfn_caller()
293 addr += paddr - svm->vm.phys_addr; __arm_ioremap_pfn_caller()
415 struct vm_struct *vm; __iounmap() local
417 vm = find_vm_area(addr); __iounmap()
424 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) __iounmap()
425 unmap_area_sections((unsigned long)vm->addr, vm->size); __iounmap()
H A Dmmu.c904 struct vm_struct *vm; iotable_init() local
915 vm = &svm->vm; iotable_init()
916 vm->addr = (void *)(md->virtual & PAGE_MASK); iotable_init()
917 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); iotable_init()
918 vm->phys_addr = __pfn_to_phys(md->pfn); iotable_init()
919 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; iotable_init()
920 vm->flags |= VM_ARM_MTYPE(md->type); iotable_init()
921 vm->caller = iotable_init; iotable_init()
929 struct vm_struct *vm; vm_reserve_area_early() local
934 vm = &svm->vm; vm_reserve_area_early()
935 vm->addr = (void *)addr; vm_reserve_area_early()
936 vm->size = size; vm_reserve_area_early()
937 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm_reserve_area_early()
938 vm->caller = caller; vm_reserve_area_early()
953 * Let's avoid the issue by inserting dummy vm entries covering the unused
965 struct vm_struct *vm; fill_pmd_gaps() local
970 vm = &svm->vm; fill_pmd_gaps()
971 addr = (unsigned long)vm->addr; fill_pmd_gaps()
976 * Check if this vm starts on an odd section boundary. fill_pmd_gaps()
987 * Then check if this vm ends on an odd section boundary. fill_pmd_gaps()
991 addr += vm->size; fill_pmd_gaps()
998 /* no need to look at any vm entry until we hit the next PMD */ fill_pmd_gaps()
H A Dmm.h75 struct vm_struct vm; member in struct:static_vm
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/bar/
H A Dgf100.c34 struct nvkm_vm *vm; member in struct:gf100_bar_priv_vm
50 ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma); gf100_bar_kmap()
65 ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12, gf100_bar_umap()
86 struct nvkm_vm *vm; gf100_bar_ctor_vm() local
102 ret = nvkm_vm_new(device, 0, bar_len, 0, &vm); gf100_bar_ctor_vm()
106 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); gf100_bar_ctor_vm()
115 &vm->pgt[0].obj[0]); gf100_bar_ctor_vm()
116 vm->pgt[0].refcount[0] = 1; gf100_bar_ctor_vm()
121 ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd); gf100_bar_ctor_vm()
122 nvkm_vm_ref(NULL, &vm, NULL); gf100_bar_ctor_vm()
176 nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd); gf100_bar_dtor()
180 if (priv->bar[0].vm) { gf100_bar_dtor()
181 nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]); gf100_bar_dtor()
182 nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd); gf100_bar_dtor()
H A Dnv50.c112 struct nvkm_vm *vm; nv50_bar_ctor() local
142 ret = nvkm_vm_new(device, start, limit, start, &vm); nv50_bar_ctor()
146 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); nv50_bar_ctor()
150 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); nv50_bar_ctor()
151 vm->pgt[0].refcount[0] = 1; nv50_bar_ctor()
155 ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd); nv50_bar_ctor()
156 nvkm_vm_ref(NULL, &vm, NULL); nv50_bar_ctor()
176 ret = nvkm_vm_new(device, start, limit--, start, &vm); nv50_bar_ctor()
180 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); nv50_bar_ctor()
182 ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd); nv50_bar_ctor()
183 nvkm_vm_ref(NULL, &vm, NULL); nv50_bar_ctor()
242 nv_error(priv, "vm flush timeout\n"); nv50_bar_init()
/linux-4.1.27/security/
H A Dmin_addr.c6 /* amount of vm to protect from userspace access by both DAC and the LSM*/
8 /* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
10 /* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */
/linux-4.1.27/tools/vm/
H A DMakefile1 # Makefile for vm tools
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_crtc.c63 struct videomode vm; atmel_hlcdc_crtc_mode_set_nofb() local
68 vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay; atmel_hlcdc_crtc_mode_set_nofb()
69 vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end; atmel_hlcdc_crtc_mode_set_nofb()
70 vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start; atmel_hlcdc_crtc_mode_set_nofb()
71 vm.hfront_porch = adj->crtc_hsync_start - adj->crtc_hdisplay; atmel_hlcdc_crtc_mode_set_nofb()
72 vm.hback_porch = adj->crtc_htotal - adj->crtc_hsync_end; atmel_hlcdc_crtc_mode_set_nofb()
73 vm.hsync_len = adj->crtc_hsync_end - adj->crtc_hsync_start; atmel_hlcdc_crtc_mode_set_nofb()
76 (vm.hsync_len - 1) | ((vm.vsync_len - 1) << 16)); atmel_hlcdc_crtc_mode_set_nofb()
79 (vm.vfront_porch - 1) | (vm.vback_porch << 16)); atmel_hlcdc_crtc_mode_set_nofb()
82 (vm.hfront_porch - 1) | ((vm.hback_porch - 1) << 16)); atmel_hlcdc_crtc_mode_set_nofb()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dnv50.h17 struct nvkm_vm *vm; member in struct:nv50_fifo_base
H A Dnv50.c418 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); nv50_fifo_context_ctor()
429 nvkm_vm_ref(NULL, &base->vm, base->pgd); nv50_fifo_context_dtor()
H A Dgf100.c60 struct nvkm_vm *vm; member in struct:gf100_fifo_base
130 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, gf100_fifo_context_attach()
312 * FIFO context - instmem heap and vm setup
340 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); gf100_fifo_context_ctor()
351 nvkm_vm_ref(NULL, &base->vm, base->pgd); gf100_fifo_context_dtor()
H A Dgk104.c78 struct nvkm_vm *vm; member in struct:gk104_fifo_base
154 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, gk104_fifo_context_attach()
345 * FIFO context - instmem heap and vm setup
372 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); gk104_fifo_context_ctor()
383 nvkm_vm_ref(NULL, &base->vm, base->pgd); gk104_fifo_context_dtor()
H A Dg84.c387 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); g84_fifo_context_ctor()
/linux-4.1.27/include/linux/platform_data/
H A Dvideo-clcd-versatile.h18 static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm) versatile_clcd_mmap_dma() argument
/linux-4.1.27/arch/s390/kernel/
H A Dsysinfo.c210 if (info->vm[lvl].ext_name_encoding == 0) print_ext_name()
214 switch (info->vm[lvl].ext_name_encoding) { print_ext_name()
229 if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be))) print_uuid()
231 seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid); print_uuid()
241 EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); stsi_3_2_2()
242 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); stsi_3_2_2()
244 seq_printf(m, "VM%02d Name: %-8.8s\n", i, info->vm[i].name); stsi_3_2_2()
245 seq_printf(m, "VM%02d Control Program: %-16.16s\n", i, info->vm[i].cpi); stsi_3_2_2()
246 seq_printf(m, "VM%02d Adjustment: %d\n", i, info->vm[i].caf); stsi_3_2_2()
247 seq_printf(m, "VM%02d CPUs Total: %d\n", i, info->vm[i].cpus_total); stsi_3_2_2()
248 seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured); stsi_3_2_2()
249 seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby); stsi_3_2_2()
250 seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved); stsi_3_2_2()
H A Dlgr.c42 } vm[VM_LEVEL_MAX]; member in struct:lgr_info
104 cpascii(lgr_info->vm[i].name, si->vm[i].name, lgr_stsi_3_2_2()
105 sizeof(si->vm[i].name)); lgr_stsi_3_2_2()
106 cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi, lgr_stsi_3_2_2()
107 sizeof(si->vm[i].cpi)); lgr_stsi_3_2_2()
H A Dearly.c174 /* re-setup boot command line with new ipl vm parms */ create_kernel_nss()
227 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) detect_machine_type()
H A Dhead.S16 * 4) generate the vm reader ipl header, move the generated image to the
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_dpi.c33 struct videomode *vm; member in struct:exynos_dpi
73 if (ctx->vm) { exynos_dpi_get_modes()
81 drm_display_mode_from_videomode(ctx->vm, mode); exynos_dpi_get_modes()
272 struct videomode *vm; exynos_dpi_parse_dt() local
277 vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL); exynos_dpi_parse_dt()
278 if (!vm) exynos_dpi_parse_dt()
281 ret = of_get_videomode(dn, vm, 0); exynos_dpi_parse_dt()
283 devm_kfree(dev, vm); exynos_dpi_parse_dt()
287 ctx->vm = vm; exynos_dpi_parse_dt()
H A Dexynos_drm_dsi.c292 struct videomode vm; member in struct:exynos_dsi
719 struct videomode *vm = &dsi->vm; exynos_dsi_set_display_mode() local
724 | DSIM_STABLE_VFP(vm->vfront_porch) exynos_dsi_set_display_mode()
725 | DSIM_MAIN_VBP(vm->vback_porch); exynos_dsi_set_display_mode()
728 reg = DSIM_MAIN_HFP(vm->hfront_porch) exynos_dsi_set_display_mode()
729 | DSIM_MAIN_HBP(vm->hback_porch); exynos_dsi_set_display_mode()
732 reg = DSIM_MAIN_VSA(vm->vsync_len) exynos_dsi_set_display_mode()
733 | DSIM_MAIN_HSA(vm->hsync_len); exynos_dsi_set_display_mode()
737 reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive); exynos_dsi_set_display_mode()
740 dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); exynos_dsi_set_display_mode()
1517 struct videomode *vm = &dsi->vm; exynos_dsi_mode_set() local
1519 vm->hactive = mode->hdisplay; exynos_dsi_mode_set()
1520 vm->vactive = mode->vdisplay; exynos_dsi_mode_set()
1521 vm->vfront_porch = mode->vsync_start - mode->vdisplay; exynos_dsi_mode_set()
1522 vm->vback_porch = mode->vtotal - mode->vsync_end; exynos_dsi_mode_set()
1523 vm->vsync_len = mode->vsync_end - mode->vsync_start; exynos_dsi_mode_set()
1524 vm->hfront_porch = mode->hsync_start - mode->hdisplay; exynos_dsi_mode_set()
1525 vm->hback_porch = mode->htotal - mode->hsync_end; exynos_dsi_mode_set()
1526 vm->hsync_len = mode->hsync_end - mode->hsync_start; exynos_dsi_mode_set()
H A Dexynos_drm_gem.h147 /* set vm_flags and we can change the vm attribute to other one at here. */
H A Dexynos_dp_core.c976 drm_display_mode_from_videomode(&dp->priv.vm, mode); exynos_dp_get_modes()
1205 ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm, exynos_dp_dt_parse_panel()
/linux-4.1.27/arch/um/include/asm/
H A Dtlbflush.h18 * - flush_tlb_kernel_vm() flushes the kernel vm area
H A Dprocessor-generic.h86 /* This decides where the kernel will search for a free chunk of vm
H A Dpage.h22 #include <asm/vm-flags.h>
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_modes.c578 * drm_display_mode_from_videomode - fill in @dmode using @vm,
579 * @vm: videomode structure to use as source
582 * Fills out @dmode using the display mode specified in @vm.
584 void drm_display_mode_from_videomode(const struct videomode *vm, drm_display_mode_from_videomode() argument
587 dmode->hdisplay = vm->hactive; drm_display_mode_from_videomode()
588 dmode->hsync_start = dmode->hdisplay + vm->hfront_porch; drm_display_mode_from_videomode()
589 dmode->hsync_end = dmode->hsync_start + vm->hsync_len; drm_display_mode_from_videomode()
590 dmode->htotal = dmode->hsync_end + vm->hback_porch; drm_display_mode_from_videomode()
592 dmode->vdisplay = vm->vactive; drm_display_mode_from_videomode()
593 dmode->vsync_start = dmode->vdisplay + vm->vfront_porch; drm_display_mode_from_videomode()
594 dmode->vsync_end = dmode->vsync_start + vm->vsync_len; drm_display_mode_from_videomode()
595 dmode->vtotal = dmode->vsync_end + vm->vback_porch; drm_display_mode_from_videomode()
597 dmode->clock = vm->pixelclock / 1000; drm_display_mode_from_videomode()
600 if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH) drm_display_mode_from_videomode()
602 else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW) drm_display_mode_from_videomode()
604 if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH) drm_display_mode_from_videomode()
606 else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW) drm_display_mode_from_videomode()
608 if (vm->flags & DISPLAY_FLAGS_INTERLACED) drm_display_mode_from_videomode()
610 if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN) drm_display_mode_from_videomode()
612 if (vm->flags & DISPLAY_FLAGS_DOUBLECLK) drm_display_mode_from_videomode()
619 * drm_display_mode_to_videomode - fill in @vm using @dmode,
621 * @vm: videomode structure to use as destination
623 * Fills out @vm using the display mode specified in @dmode.
626 struct videomode *vm) drm_display_mode_to_videomode()
628 vm->hactive = dmode->hdisplay; drm_display_mode_to_videomode()
629 vm->hfront_porch = dmode->hsync_start - dmode->hdisplay; drm_display_mode_to_videomode()
630 vm->hsync_len = dmode->hsync_end - dmode->hsync_start; drm_display_mode_to_videomode()
631 vm->hback_porch = dmode->htotal - dmode->hsync_end; drm_display_mode_to_videomode()
633 vm->vactive = dmode->vdisplay; drm_display_mode_to_videomode()
634 vm->vfront_porch = dmode->vsync_start - dmode->vdisplay; drm_display_mode_to_videomode()
635 vm->vsync_len = dmode->vsync_end - dmode->vsync_start; drm_display_mode_to_videomode()
636 vm->vback_porch = dmode->vtotal - dmode->vsync_end; drm_display_mode_to_videomode()
638 vm->pixelclock = dmode->clock * 1000; drm_display_mode_to_videomode()
640 vm->flags = 0; drm_display_mode_to_videomode()
642 vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH; drm_display_mode_to_videomode()
644 vm->flags |= DISPLAY_FLAGS_HSYNC_LOW; drm_display_mode_to_videomode()
646 vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH; drm_display_mode_to_videomode()
648 vm->flags |= DISPLAY_FLAGS_VSYNC_LOW; drm_display_mode_to_videomode()
650 vm->flags |= DISPLAY_FLAGS_INTERLACED; drm_display_mode_to_videomode()
652 vm->flags |= DISPLAY_FLAGS_DOUBLESCAN; drm_display_mode_to_videomode()
654 vm->flags |= DISPLAY_FLAGS_DOUBLECLK; drm_display_mode_to_videomode()
675 struct videomode vm; of_get_drm_display_mode() local
678 ret = of_get_videomode(np, &vm, index); of_get_drm_display_mode()
682 drm_display_mode_from_videomode(&vm, dmode); of_get_drm_display_mode()
685 of_node_full_name(np), vm.hactive, vm.vactive, np->name); of_get_drm_display_mode()
625 drm_display_mode_to_videomode(const struct drm_display_mode *dmode, struct videomode *vm) drm_display_mode_to_videomode() argument
H A Ddrm_info.c69 * Called when "/proc/dri/.../vm" is read.
H A Ddrm_debugfs.c49 {"vm", drm_vm_info, 0},
/linux-4.1.27/drivers/video/fbdev/
H A Dsh7760fb.c202 struct fb_videomode *vm = par->pd->def_mode; sh7760fb_set_par() local
212 if (par->rot && (vm->xres > 320)) { sh7760fb_set_par()
218 hsynp = vm->right_margin + vm->xres; sh7760fb_set_par()
219 hsynw = vm->hsync_len; sh7760fb_set_par()
220 htcn = vm->left_margin + hsynp + hsynw; sh7760fb_set_par()
221 hdcn = vm->xres; sh7760fb_set_par()
222 vsynp = vm->lower_margin + vm->yres; sh7760fb_set_par()
223 vsynw = vm->vsync_len; sh7760fb_set_par()
224 vtln = vm->upper_margin + vsynp + vsynw; sh7760fb_set_par()
225 vdln = vm->yres; sh7760fb_set_par()
244 if (!(vm->sync & FB_SYNC_HOR_HIGH_ACT)) sh7760fb_set_par()
246 if (!(vm->sync & FB_SYNC_VERT_HIGH_ACT)) sh7760fb_set_par()
H A Dpxa168fb.h388 #define CFG_GRA_VM_ENA(vm) ((vm) << 15) /* gfx */
390 #define CFG_DMA_VM_ENA(vm) ((vm) << 13) /* video */
392 #define CFG_CMD_VM_ENA(vm) ((vm) << 13)
H A Dmxsfb.c737 struct videomode vm; mxsfb_init_fbinfo_dt() local
779 ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE); mxsfb_init_fbinfo_dt()
785 ret = fb_videomode_from_videomode(&vm, vmode); mxsfb_init_fbinfo_dt()
789 if (vm.flags & DISPLAY_FLAGS_DE_HIGH) mxsfb_init_fbinfo_dt()
791 if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) mxsfb_init_fbinfo_dt()
H A Dmetronomefb.c442 /* copy from vm to metromem */ metronomefb_dpy_update()
457 /* swizzle from vm to metromem and recalc cksum at the same time*/ metronomefb_dpy_update_page()
600 info->screen_base which is vm, and is the fb used by apps. metronomefb_probe()
H A Datmel_lcdfb.c1132 struct videomode vm; atmel_lcdfb_of_init() local
1135 ret = videomode_from_timings(timings, &vm, i); atmel_lcdfb_of_init()
1138 ret = fb_videomode_from_videomode(&vm, &fb_vm); atmel_lcdfb_of_init()
H A Dtgafb.c422 min_diff = delta, vm = m, va = a, vr = r; \
430 int r,a,m,vm = 34, va = 1, vr = 30; tgafb_set_pll() local
496 TGA_WRITE_REG(par, (vm >> r) & 1, TGA_CLOCK_REG); tgafb_set_pll()
/linux-4.1.27/drivers/video/fbdev/core/
H A Dfbmon.c1400 int fb_videomode_from_videomode(const struct videomode *vm, fb_videomode_from_videomode() argument
1405 fbmode->xres = vm->hactive; fb_videomode_from_videomode()
1406 fbmode->left_margin = vm->hback_porch; fb_videomode_from_videomode()
1407 fbmode->right_margin = vm->hfront_porch; fb_videomode_from_videomode()
1408 fbmode->hsync_len = vm->hsync_len; fb_videomode_from_videomode()
1410 fbmode->yres = vm->vactive; fb_videomode_from_videomode()
1411 fbmode->upper_margin = vm->vback_porch; fb_videomode_from_videomode()
1412 fbmode->lower_margin = vm->vfront_porch; fb_videomode_from_videomode()
1413 fbmode->vsync_len = vm->vsync_len; fb_videomode_from_videomode()
1416 fbmode->pixclock = vm->pixelclock ? fb_videomode_from_videomode()
1417 KHZ2PICOS(vm->pixelclock / 1000) : 0; fb_videomode_from_videomode()
1421 if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH) fb_videomode_from_videomode()
1423 if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH) fb_videomode_from_videomode()
1425 if (vm->flags & DISPLAY_FLAGS_INTERLACED) fb_videomode_from_videomode()
1427 if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN) fb_videomode_from_videomode()
1431 htotal = vm->hactive + vm->hfront_porch + vm->hback_porch + fb_videomode_from_videomode()
1432 vm->hsync_len; fb_videomode_from_videomode()
1433 vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch + fb_videomode_from_videomode()
1434 vm->vsync_len; fb_videomode_from_videomode()
1437 fbmode->refresh = vm->pixelclock / (htotal * vtotal); fb_videomode_from_videomode()
1471 struct videomode vm; of_get_fb_videomode() local
1474 ret = of_get_videomode(np, &vm, index); of_get_fb_videomode()
1478 fb_videomode_from_videomode(&vm, fb); of_get_fb_videomode()
1481 of_node_full_name(np), vm.hactive, vm.vactive, np->name); of_get_fb_videomode()
/linux-4.1.27/arch/unicore32/mm/
H A Dioremap.c238 struct vm_struct *vm; __uc32_iounmap() local
247 vm = find_vm_area(addr); __uc32_iounmap()
248 if (vm && (vm->flags & VM_IOREMAP) && __uc32_iounmap()
249 (vm->flags & VM_UNICORE_SECTION_MAPPING)) __uc32_iounmap()
250 unmap_area_sections((unsigned long)vm->addr, vm->size); __uc32_iounmap()
/linux-4.1.27/drivers/xen/
H A Dsys-hypervisor.c120 char *vm, *val; uuid_show_fallback() local
127 vm = xenbus_read(XBT_NIL, "vm", "", NULL); uuid_show_fallback()
128 if (IS_ERR(vm)) uuid_show_fallback()
129 return PTR_ERR(vm); uuid_show_fallback()
130 val = xenbus_read(XBT_NIL, vm, "uuid", NULL); uuid_show_fallback()
131 kfree(vm); uuid_show_fallback()
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dclient.h11 struct nvkm_vm *vm; member in struct:nvkm_client
/linux-4.1.27/arch/alpha/include/uapi/asm/
H A Dsetup.h33 * the zero page is zeroed out as soon as the vm system is
/linux-4.1.27/tools/testing/fault-injection/
H A Dfailcmd.sh34 set /proc/sys/vm/oom_kill_allocating_task to specified value
114 oom_kill_allocating_task_saved=`cat /proc/sys/vm/oom_kill_allocating_task`
120 > /proc/sys/vm/oom_kill_allocating_task
211 echo $oom_kill_allocating_task > /proc/sys/vm/oom_kill_allocating_task
/linux-4.1.27/include/linux/
H A Dkasan.h55 void kasan_free_shadow(const struct vm_struct *vm);
84 static inline void kasan_free_shadow(const struct vm_struct *vm) {} argument
H A Dvmalloc.h49 struct vm_struct *vm; member in struct:vmap_area
159 extern __init void vm_area_add_early(struct vm_struct *vm);
160 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
H A Dseqno-fence.h84 * device's vm can be expensive.
H A Dvmstat.h31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
/linux-4.1.27/mm/
H A Dvmalloc.c1147 * @vm: vm_struct to add
1149 * This function is used to add fixed kernel vm area to vmlist before
1150 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1155 void __init vm_area_add_early(struct vm_struct *vm) vm_area_add_early() argument
1161 if (tmp->addr >= vm->addr) { vm_area_add_early()
1162 BUG_ON(tmp->addr < vm->addr + vm->size); vm_area_add_early()
1165 BUG_ON(tmp->addr + tmp->size > vm->addr); vm_area_add_early()
1167 vm->next = *p; vm_area_add_early()
1168 *p = vm; vm_area_add_early()
1173 * @vm: vm_struct to register
1176 * This function is used to register kernel vm area before
1177 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1179 * vm->addr contains the allocated address.
1183 void __init vm_area_register_early(struct vm_struct *vm, size_t align) vm_area_register_early() argument
1189 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; vm_area_register_early()
1191 vm->addr = (void *)addr; vm_area_register_early()
1193 vm_area_add_early(vm); vm_area_register_early()
1220 va->vm = tmp;
1304 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, setup_vmalloc_vm() argument
1308 vm->flags = flags; setup_vmalloc_vm()
1309 vm->addr = (void *)va->va_start; setup_vmalloc_vm()
1310 vm->size = va->va_end - va->va_start; setup_vmalloc_vm()
1311 vm->caller = caller; setup_vmalloc_vm()
1312 va->vm = vm; setup_vmalloc_vm()
1317 static void clear_vm_uninitialized_flag(struct vm_struct *vm) clear_vm_uninitialized_flag() argument
1321 * we should make sure that vm has proper values. clear_vm_uninitialized_flag()
1325 vm->flags &= ~VM_UNINITIALIZED; clear_vm_uninitialized_flag()
1415 return va->vm; find_vm_area()
1434 struct vm_struct *vm = va->vm; remove_vm_area() local
1437 va->vm = NULL; remove_vm_area()
1442 kasan_free_shadow(vm); remove_vm_area()
1444 vm->size -= PAGE_SIZE; remove_vm_area()
1446 return vm; remove_vm_area()
1464 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", __vunmap()
1640 * @start: vm area range start
1641 * @end: vm area range end
1644 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
1976 * @addr: vm address.
2002 struct vm_struct *vm; vread() local
2019 vm = va->vm; vread()
2020 vaddr = (char *) vm->addr; vread()
2021 if (addr >= vaddr + get_vm_area_size(vm)) vread()
2031 n = vaddr + get_vm_area_size(vm) - addr; vread()
2034 if (!(vm->flags & VM_IOREMAP)) vread()
2057 * @addr: vm address.
2083 struct vm_struct *vm; vwrite() local
2101 vm = va->vm; vwrite()
2102 vaddr = (char *) vm->addr; vwrite()
2103 if (addr >= vaddr + get_vm_area_size(vm)) vwrite()
2112 n = vaddr + get_vm_area_size(vm) - addr; vwrite()
2115 if (!(vm->flags & VM_IOREMAP)) { vwrite()
2368 * Percpu allocator wants to use congruent vm areas so that it can
2517 /* insert all vm's */ pcpu_get_vm_areas()
2627 v = va->vm; s_show()
H A Dzsmalloc.c274 struct vm_struct *vm; /* vm area for mapping object that span pages */ member in struct:mapping_area
1037 if (area->vm) __zs_cpu_up()
1039 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); __zs_cpu_up()
1040 if (!area->vm) __zs_cpu_up()
1047 if (area->vm) __zs_cpu_down()
1048 free_vm_area(area->vm); __zs_cpu_down()
1049 area->vm = NULL; __zs_cpu_down()
1055 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); __zs_map_object()
1056 area->vm_addr = area->vm->addr; __zs_map_object()
H A Dpercpu.c140 /* group information, used for vm allocation */
829 #include "percpu-vm.c"
1310 * and, from the second one, the backing allocator (currently either vm or
1503 * for vm areas.
1520 * areas and the other for the dynamic area. They share the same vm
2076 static struct vm_struct vm; pcpu_page_first_chunk() local
2117 /* allocate vm area, map the pages and copy static data */ pcpu_page_first_chunk()
2118 vm.flags = VM_ALLOC; pcpu_page_first_chunk()
2119 vm.size = num_possible_cpus() * ai->unit_size; pcpu_page_first_chunk()
2120 vm_area_register_early(&vm, PAGE_SIZE); pcpu_page_first_chunk()
2124 (unsigned long)vm.addr + unit * ai->unit_size; pcpu_page_first_chunk()
2149 unit_pages, psize_str, vm.addr, ai->static_size, pcpu_page_first_chunk()
2152 rc = pcpu_setup_first_chunk(ai, vm.addr); pcpu_page_first_chunk()
H A Dcleancache.c6 * Documentation/vm/cleancache.txt for more information.
H A Dpercpu-vm.c2 * mm/percpu-vm.c - vmalloc area based chunk allocation
H A Dvmstat.c53 * Accumulate the vm event counters across all CPUs.
1232 seq_printf(m, "\n vm stats threshold: %d", for_each_online_cpu()
1423 * threads for vm statistics updates disabled because of
H A Dpage-writeback.c68 /* The following parameters are exported via /proc/sys/vm */
257 * - vm.dirty_background_ratio or vm.dirty_background_bytes
258 * - vm.dirty_ratio or vm.dirty_bytes
1655 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
H A Drmap.c989 * @vma: the vm area in which the mapping is added
1016 * @vma: the vm area in which the mapping is added
1066 * @vma: the vm area in which the mapping is added
H A Dcompaction.c1162 * /proc/sys/vm/compact_memory __compact_finished()
1229 * /proc/sys/vm/compact_memory __compaction_suitable()
1602 * When called via /proc/sys/vm/compact_memory __compact_pgdat()
1662 /* This is the entry point for compacting all nodes via /proc/sys/vm */ sysctl_compaction_handler()
H A Dfrontswap.c6 * Documentation/vm/frontswap.txt for more information.
H A Dmadvise.c42 * We can potentially split a vm area into separate
H A Dmmap.c143 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
251 * Unlink a file-based vm structure from its interval tree, to hide
267 * Close a vm structure and free it, returning the next.
2636 "See Documentation/vm/remap_file_pages.txt.\n", SYSCALL_DEFINE5()
2872 /* Insert vm structure into process list sorted by address
2988 * Return true if the calling process may expand its vm space by the passed
3388 pr_info("vm.user_reserve_kbytes reset to %lu\n", reserve_mem_notifier()
3394 pr_info("vm.admin_reserve_kbytes reset to %lu\n", reserve_mem_notifier()
H A Dmremap.c353 /* We can't remap across vm area boundaries */ vma_to_resize()
H A Doom_kill.c346 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
576 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
H A Dnommu.c1500 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" do_mmap_pgoff()
1909 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
/linux-4.1.27/drivers/video/fbdev/omap2/displays-new/
H A Dpanel-dpi.c164 struct videomode vm; panel_dpi_probe_pdata() local
180 videomode_from_timing(pdata->display_timing, &vm); panel_dpi_probe_pdata()
181 videomode_to_omap_video_timings(&vm, &ddata->videomode); panel_dpi_probe_pdata()
209 struct videomode vm; panel_dpi_probe_of() local
233 videomode_from_timing(&timing, &vm); panel_dpi_probe_of()
234 videomode_to_omap_video_timings(&vm, &ddata->videomode); panel_dpi_probe_of()
/linux-4.1.27/drivers/s390/cio/
H A Ddevice_id.c173 int vm = 0; snsid_callback() local
180 vm = 1; snsid_callback()
186 senseid->dev_model, vm ? " (diag210)" : ""); snsid_callback()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_ttm.c216 struct nvkm_vm *vm = NULL; nv04_gart_manager_init() local
217 nvkm_vm_ref(priv->vm, &vm, NULL); nv04_gart_manager_init()
218 man->priv = vm; nv04_gart_manager_init()
225 struct nvkm_vm *vm = man->priv; nv04_gart_manager_fini() local
226 nvkm_vm_ref(NULL, &vm, NULL); nv04_gart_manager_fini()
H A Dnv84_fence.c151 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); nv84_fence_context_new()
153 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, nv84_fence_context_new()
157 /* map display semaphore buffers into channel's vm */ nv84_fence_context_new()
160 ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]); nv84_fence_context_new()
H A Dnouveau_gem.c71 if (!cli->vm) nouveau_gem_object_open()
78 vma = nouveau_bo_vma_find(nvbo, cli->vm); nouveau_gem_object_open()
90 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); nouveau_gem_object_open()
154 if (!cli->vm) nouveau_gem_object_close()
161 vma = nouveau_bo_vma_find(nvbo, cli->vm); nouveau_gem_object_close()
237 if (cli->vm) { nouveau_gem_info()
238 vma = nouveau_bo_vma_find(nvbo, cli->vm); nouveau_gem_info()
H A Dnouveau_bo.c195 if (drm->client.vm) nouveau_bo_new()
196 lpg_shift = drm->client.vm->mmu->lpg_shift; nouveau_bo_new()
221 if (drm->client.vm) { nouveau_bo_new()
223 nvbo->page_shift = drm->client.vm->mmu->lpg_shift; nouveau_bo_new()
1044 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift, nouveau_bo_move_prep()
1049 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift, nouveau_bo_move_prep()
1245 nvbo->page_shift != vma->vm->mmu->lpg_shift)) { nouveau_bo_move_ntfy()
1619 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm) nouveau_bo_vma_find() argument
1623 if (vma->vm == vm) nouveau_bo_vma_find()
1631 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, nouveau_bo_vma_add() argument
1637 ret = nvkm_vm_get(vm, size, nvbo->page_shift, nouveau_bo_vma_add()
1644 nvbo->page_shift != vma->vm->mmu->lpg_shift)) nouveau_bo_vma_add()
H A Dnouveau_chan.c129 ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm, nouveau_channel_prep()
139 args.limit = cli->vm->mmu->limit - 1; nouveau_channel_prep()
298 args.limit = cli->vm->mmu->limit - 1; nouveau_channel_init()
316 args.limit = cli->vm->mmu->limit - 1; nouveau_channel_init()
H A Dnouveau_drm.c127 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); nouveau_cli_destroy()
440 0x1000, &drm->client.vm); nouveau_drm_load()
444 nvxx_client(&drm->client.base)->vm = drm->client.vm; nouveau_drm_load()
836 0x1000, &cli->vm); nouveau_drm_open()
842 nvxx_client(&cli->base)->vm = cli->vm; nouveau_drm_open()
H A Dnouveau_drm.h85 struct nvkm_vm *vm; /*XXX*/ member in struct:nouveau_cli
H A Dnouveau_dma.c91 vma = nouveau_bo_vma_find(bo, cli->vm); nv50_dma_push()
H A Dnouveau_abi16.c316 ret = nouveau_bo_vma_add(chan->ntfy, cli->vm, nouveau_abi16_ioctl_channel_alloc()
H A Dnouveau_fbcon.c382 ret = nouveau_bo_vma_add(nvbo, drm->client.vm, nouveau_fbcon_create()
/linux-4.1.27/drivers/media/radio/si4713/
H A Dsi4713.c1069 struct v4l2_modulator vm; si4713_setup() local
1078 vm.index = 0; si4713_setup()
1080 vm.txsubchans = V4L2_TUNER_SUB_STEREO; si4713_setup()
1082 vm.txsubchans = V4L2_TUNER_SUB_MONO; si4713_setup()
1084 vm.txsubchans |= V4L2_TUNER_SUB_RDS; si4713_setup()
1085 si4713_s_modulator(&sdev->sd, &vm); si4713_setup()
1269 static int si4713_g_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *vm) si4713_g_modulator() argument
1277 if (vm->index > 0) si4713_g_modulator()
1280 strncpy(vm->name, "FM Modulator", 32); si4713_g_modulator()
1281 vm->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW | si4713_g_modulator()
1285 vm->rangelow = si4713_to_v4l2(FREQ_RANGE_LOW); si4713_g_modulator()
1286 vm->rangehigh = si4713_to_v4l2(FREQ_RANGE_HIGH); si4713_g_modulator()
1301 vm->txsubchans = V4L2_TUNER_SUB_STEREO; si4713_g_modulator()
1303 vm->txsubchans = V4L2_TUNER_SUB_MONO; si4713_g_modulator()
1307 vm->txsubchans |= V4L2_TUNER_SUB_RDS; si4713_g_modulator()
1309 vm->txsubchans &= ~V4L2_TUNER_SUB_RDS; si4713_g_modulator()
1315 static int si4713_s_modulator(struct v4l2_subdev *sd, const struct v4l2_modulator *vm) si4713_s_modulator() argument
1325 if (vm->index > 0) si4713_s_modulator()
1329 if (vm->txsubchans & V4L2_TUNER_SUB_STEREO) si4713_s_modulator()
1331 else if (vm->txsubchans & V4L2_TUNER_SUB_MONO) si4713_s_modulator()
1336 rds = !!(vm->txsubchans & V4L2_TUNER_SUB_RDS); si4713_s_modulator()
H A Dradio-platform-si4713.c95 struct v4l2_modulator *vm) radio_si4713_g_modulator()
98 g_modulator, vm); radio_si4713_g_modulator()
102 const struct v4l2_modulator *vm) radio_si4713_s_modulator()
105 s_modulator, vm); radio_si4713_s_modulator()
94 radio_si4713_g_modulator(struct file *file, void *p, struct v4l2_modulator *vm) radio_si4713_g_modulator() argument
101 radio_si4713_s_modulator(struct file *file, void *p, const struct v4l2_modulator *vm) radio_si4713_s_modulator() argument
H A Dradio-usb-si4713.c92 struct v4l2_modulator *vm) vidioc_g_modulator()
96 return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_modulator, vm); vidioc_g_modulator()
100 const struct v4l2_modulator *vm) vidioc_s_modulator()
104 return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_modulator, vm); vidioc_s_modulator()
91 vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *vm) vidioc_g_modulator() argument
99 vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *vm) vidioc_s_modulator() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/
H A Dengctx.c95 if (client->vm) nvkm_engctx_create_()
96 atomic_inc(&client->vm->engref[nv_engidx(engine)]); nvkm_engctx_create_()
115 if (client->vm) nvkm_engctx_destroy()
116 atomic_dec(&client->vm->engref[nv_engidx(engine)]); nvkm_engctx_destroy()
H A Dgpuobj.c251 nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, nvkm_gpuobj_map_vm() argument
259 ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma); nvkm_gpuobj_map_vm()
/linux-4.1.27/arch/cris/include/asm/
H A Dprocessor.h23 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/hexagon/include/asm/
H A Dkgdb.h35 * vm regs = psp+elr+est+badva = 4
H A Dpage.h108 /* Default vm area behavior is non-executable. */
H A Dprocessor.h62 * Decides where the kernel will search for a free chunk of vm space during
/linux-4.1.27/arch/alpha/include/asm/
H A Dprocessor.h28 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/tools/testing/selftests/
H A DMakefile19 TARGETS += vm
/linux-4.1.27/block/partitions/
H A Dldm.c228 * @vm: In-memory vmdb structure in which to return parsed information
231 * the in-memory vmdb structure @vm with the obtained information.
235 * Return: 'true' @vm contains VMDB info
236 * 'false' @vm contents are undefined
238 static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) ldm_parse_vmdb() argument
240 BUG_ON (!data || !vm); ldm_parse_vmdb()
247 vm->ver_major = get_unaligned_be16(data + 0x12); ldm_parse_vmdb()
248 vm->ver_minor = get_unaligned_be16(data + 0x14); ldm_parse_vmdb()
249 if ((vm->ver_major != 4) || (vm->ver_minor != 10)) { ldm_parse_vmdb()
251 "Aborting.", 4, 10, vm->ver_major, vm->ver_minor); ldm_parse_vmdb()
255 vm->vblk_size = get_unaligned_be32(data + 0x08); ldm_parse_vmdb()
256 if (vm->vblk_size == 0) { ldm_parse_vmdb()
261 vm->vblk_offset = get_unaligned_be32(data + 0x0C); ldm_parse_vmdb()
262 vm->last_vblk_seq = get_unaligned_be32(data + 0x04); ldm_parse_vmdb()
499 struct vmdb *vm; ldm_validate_vmdb() local
504 vm = &ldb->vm; ldm_validate_vmdb()
513 if (!ldm_parse_vmdb (data, vm)) ldm_validate_vmdb()
522 if (vm->vblk_offset != 512) ldm_validate_vmdb()
523 ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset); ldm_validate_vmdb()
529 if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) { ldm_validate_vmdb()
1403 if (!ldm_ldmdb_add (f->data, f->num*ldb->vm.vblk_size, ldb)) list_for_each()
1432 size = ldb->vm.vblk_size; ldm_get_vblks()
1434 skip = ldb->vm.vblk_offset >> 9; /* Bytes to sectors */ ldm_get_vblks()
1435 finish = (size * ldb->vm.last_vblk_seq) >> 9; ldm_get_vblks()
H A Dldm.h204 struct vmdb vm; member in struct:ldmdb
/linux-4.1.27/arch/x86/xen/
H A Dp2m.c393 static struct vm_struct vm; xen_vmalloc_p2m_tree() local
397 vm.flags = VM_ALLOC; xen_vmalloc_p2m_tree()
398 vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), xen_vmalloc_p2m_tree()
400 vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); xen_vmalloc_p2m_tree()
401 pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); xen_vmalloc_p2m_tree()
403 xen_max_p2m_pfn = vm.size / sizeof(unsigned long); xen_vmalloc_p2m_tree()
405 xen_rebuild_p2m_list(vm.addr); xen_vmalloc_p2m_tree()
407 xen_p2m_addr = vm.addr; xen_vmalloc_p2m_tree()
/linux-4.1.27/arch/s390/kvm/
H A Dpriv.c488 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); handle_stsi_3_2_2()
490 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); handle_stsi_3_2_2()
491 mem->vm[0].cpus_total = cpus; handle_stsi_3_2_2()
492 mem->vm[0].cpus_configured = cpus; handle_stsi_3_2_2()
493 mem->vm[0].cpus_standby = 0; handle_stsi_3_2_2()
494 mem->vm[0].cpus_reserved = 0; handle_stsi_3_2_2()
495 mem->vm[0].caf = 1000; handle_stsi_3_2_2()
496 memcpy(mem->vm[0].name, "KVMguest", 8); handle_stsi_3_2_2()
497 ASCEBC(mem->vm[0].name, 8); handle_stsi_3_2_2()
498 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); handle_stsi_3_2_2()
499 ASCEBC(mem->vm[0].cpi, 16); handle_stsi_3_2_2()
H A Dtrace-s390.h34 TP_printk("create vm%s",
/linux-4.1.27/arch/tile/mm/
H A Dpgtable.c479 struct vm_area_struct *vm; update_priority_cached() local
480 for (vm = mm->mmap; vm; vm = vm->vm_next) { update_priority_cached()
481 if (hv_pte_get_cached_priority(vm->vm_page_prot)) update_priority_cached()
484 if (vm == NULL) update_priority_cached()
563 /* Use the vm area unlocked, assuming the caller iounmap()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dprocessor.h56 * This decides where the kernel will search for a free chunk of vm
94 * This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/drivers/gpu/drm/imx/
H A Dipuv3-crtc.c244 struct videomode vm; ipu_crtc_mode_fixup() local
247 drm_display_mode_to_videomode(adjusted_mode, &vm); ipu_crtc_mode_fixup()
249 ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm); ipu_crtc_mode_fixup()
253 drm_display_mode_from_videomode(&vm, adjusted_mode); ipu_crtc_mode_fixup()
/linux-4.1.27/arch/x86/mm/
H A Dpgtable_32.c75 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ parse_vmalloc()
H A Dioremap.c312 /* Use the vm area unlocked, assuming the caller iounmap()
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/subdev/
H A Dmmu.h21 struct nvkm_vm *vm; member in struct:nvkm_vma
/linux-4.1.27/arch/score/include/asm/
H A Dprocessor.h37 * This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/include/drm/
H A Dexynos_drm.h28 struct videomode vm; member in struct:exynos_drm_panel_info
H A Ddrm_modes.h201 void drm_display_mode_from_videomode(const struct videomode *vm,
204 struct videomode *vm);
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event.h414 #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
418 .valid_mask = (vm), \
423 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
424 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
426 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
428 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
/linux-4.1.27/drivers/gpu/drm/tilcdc/
H A Dtilcdc_panel.c181 struct videomode vm; panel_connector_get_modes() local
183 if (videomode_from_timings(timings, &vm, i)) panel_connector_get_modes()
186 drm_display_mode_from_videomode(&vm, mode); panel_connector_get_modes()
/linux-4.1.27/mm/kasan/
H A Dkasan.c440 void kasan_free_shadow(const struct vm_struct *vm) kasan_free_shadow() argument
442 if (vm->flags & VM_KASAN) kasan_free_shadow()
443 vfree(kasan_mem_to_shadow(vm->addr)); kasan_free_shadow()
/linux-4.1.27/drivers/input/touchscreen/
H A Dintel-mid-touch.c138 static int mrstouch_ts_chan_read(u16 offset, u16 chan, u16 *vp, u16 *vm) mrstouch_ts_chan_read() argument
161 *vm = (res & 0xFF) << 3; /* Highest 7 bits */ mrstouch_ts_chan_read()
162 *vm |= (res >> 8) & 0x07; /* Lower 3 bits */ mrstouch_ts_chan_read()
163 *vm &= 0x3FF; mrstouch_ts_chan_read()
/linux-4.1.27/arch/s390/include/asm/
H A Dsysinfo.h124 } vm[8]; member in struct:sysinfo_3_2_2
/linux-4.1.27/arch/openrisc/include/asm/
H A Dprocessor.h46 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/avr32/include/asm/
H A Dprocessor.h89 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/c6x/include/asm/
H A Dprocessor.h49 * This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/m32r/include/asm/
H A Dprocessor.h68 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/ia64/include/asm/sn/
H A Dtioca_provider.h72 u64 *ca_pcigart; /* gfx GART vm address */
81 u64 *ca_gfxgart; /* gfx GART vm address */
/linux-4.1.27/arch/s390/mm/
H A Dpgtable.c481 * @vmaddr: vm address associated with the host page table
500 * @vmaddr: vm address
503 * if the vm address is already mapped to a different guest segment.
581 * if the vm address is already mapped to a different guest segment.
631 /* Find the vm address for the guest address */ __gmap_zap()
667 /* Find the vm address for the guest address */ gmap_discard()
944 .procname = "vm",
H A Dcmm.c353 .procname = "vm",
/linux-4.1.27/drivers/gpu/drm/panel/
H A Dpanel-ld9040.c98 struct videomode vm; member in struct:ld9040
275 drm_display_mode_from_videomode(&ctx->vm, mode); ld9040_get_modes()
301 ret = of_get_videomode(np, &ctx->vm, 0); ld9040_parse_dt()
H A Dpanel-simple.c104 struct videomode vm; panel_simple_get_fixed_modes() local
106 videomode_from_timing(dt, &vm); panel_simple_get_fixed_modes()
114 drm_display_mode_from_videomode(&vm, mode); panel_simple_get_fixed_modes()
H A Dpanel-s6e8aa0.c105 struct videomode vm; member in struct:s6e8aa0
934 drm_display_mode_from_videomode(&ctx->vm, mode); s6e8aa0_get_modes()
960 ret = of_get_videomode(np, &ctx->vm, 0); s6e8aa0_parse_dt()
/linux-4.1.27/arch/um/os-Linux/
H A Dmem.c42 * not subject to the host's vm.dirty_ratio. If a tempdir is specified in the
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/
H A Dnv04.c64 struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0]; nv04_dmaobj_bind()
/linux-4.1.27/arch/s390/include/uapi/asm/
H A Dkvm.h58 /* kvm attr_group on vm fd */
/linux-4.1.27/arch/sh/include/asm/
H A Dprocessor_32.h39 /* This decides where the kernel will search for a free chunk of vm
H A Dprocessor_64.h47 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/mn10300/include/asm/
H A Dprocessor.h86 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/m68k/include/asm/
H A Dprocessor.h67 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/metag/include/asm/
H A Dprocessor.h28 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/arc/include/asm/
H A Dprocessor.h130 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/virt/kvm/
H A Dirqchip.c123 /* Called only during vm destruction. Nobody can use the pointer kvm_free_irq_routing()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-v4l2.c588 static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *vm) pvr2_querymenu() argument
595 ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw, vm->id), pvr2_querymenu()
596 vm->index, pvr2_querymenu()
597 vm->name, sizeof(vm->name) - 1, pvr2_querymenu()
599 vm->name[cnt] = 0; pvr2_querymenu()
/linux-4.1.27/include/uapi/linux/
H A Dkvm.h192 /* Encounter unexpected vm-exit due to delivery event. */
693 #define KVM_CAP_NR_VCPUS 9 /* returns recommended max vcpus per vm */
694 #define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
767 #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
1111 /* ioctl for vm fd */
1173 * vm version available with KVM_CAP_ENABLE_CAP_VM
H A Dsysctl.h164 VM_UNUSED1=1, /* was: struct: Set vm swapping control */
186 VM_LAPTOP_MODE=23, /* vm laptop mode */
/linux-4.1.27/drivers/video/fbdev/mmp/hw/
H A Dmmp_ctrl.h633 #define CFG_GRA_VM_ENA(vm) ((vm)<<15)
635 #define CFG_DMA_VM_ENA(vm) ((vm)<<13)
637 #define CFG_CMD_VM_ENA(vm) ((vm)<<12)
/linux-4.1.27/drivers/misc/cxl/
H A Dfile.c238 static int afu_mmap(struct file *file, struct vm_area_struct *vm) afu_mmap() argument
246 return cxl_context_iomap(ctx, vm); afu_mmap()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dprocessor.h104 /* This decides where the kernel will search for a free chunk of vm
124 /* This decides where the kernel will search for a free chunk of vm
H A Dpte-common.h128 * Note due to the way vm flags are laid out, the bits are XWR
/linux-4.1.27/arch/x86/include/asm/
H A Dtlbflush.h161 * vm statistics themselves.
H A Dprocessor.h883 /* This decides where the kernel will search for a free chunk of vm
916 * This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/xtensa/include/asm/
H A Dprocessor.h125 /* This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/sh/kernel/
H A Dsmp.c150 * from the vm mask set of all processes. __cpu_disable()
/linux-4.1.27/arch/um/kernel/
H A Dphysmem.c51 "/proc/sys/vm/max_map_count to <physical " map_memory()
/linux-4.1.27/arch/powerpc/kvm/
H A Dtiming.c224 snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing", kvmppc_create_vcpu_debugfs()
/linux-4.1.27/arch/ia64/mm/
H A Dfault.c134 * May find no vma, but could be that the last vm area is the ia64_do_page_fault()
/linux-4.1.27/arch/arm/mach-sa1100/
H A Dgeneric.c338 * Below 0xe8000000 is reserved for vm allocation.
/linux-4.1.27/arch/alpha/mm/
H A Dinit.c200 /* register the vm area */ callback_init()
/linux-4.1.27/drivers/video/fbdev/matrox/
H A Dmatroxfb_base.c1291 vaddr_t vm; matroxfb_getmemory() local
1300 vm = minfo->video.vbase; matroxfb_getmemory()
1312 *tmp++ = mga_readb(vm, offs); matroxfb_getmemory()
1314 mga_writeb(vm, offs, 0x02); matroxfb_getmemory()
1317 if (mga_readb(vm, offs) != 0x02) matroxfb_getmemory()
1319 mga_writeb(vm, offs, mga_readb(vm, offs) - 0x02); matroxfb_getmemory()
1320 if (mga_readb(vm, offs)) matroxfb_getmemory()
1325 mga_writeb(vm, offs2, *tmp++); matroxfb_getmemory()
/linux-4.1.27/arch/xtensa/kernel/
H A Dsmp.c273 * from the vm mask set of all processes. __cpu_disable()
/linux-4.1.27/arch/tile/include/asm/
H A Dprocessor.h184 * This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/gr/
H A Dgf100.c279 struct nvkm_vm *vm = nvkm_client(parent)->vm; gf100_gr_context_ctor() local
303 ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm, gf100_gr_context_ctor()
316 ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access, gf100_gr_context_ctor()
H A Dnv50.c291 nv_error(priv, "vm flush timeout\n"); g84_gr_tlb_flush()
/linux-4.1.27/arch/mips/include/asm/
H A Dprocessor.h86 * This decides where the kernel will search for a free chunk of vm
/linux-4.1.27/arch/arm/include/asm/
H A Dmemory.h75 * The XIP kernel gets mapped at the bottom of the module vm area.
/linux-4.1.27/security/keys/
H A Dkeyctl.c70 bool vm; SYSCALL_DEFINE5() local
101 vm = false; SYSCALL_DEFINE5()
108 vm = true; SYSCALL_DEFINE5()
141 if (!vm) SYSCALL_DEFINE5()
/linux-4.1.27/fs/proc/
H A Dtask_mmu.c750 * Documentation/vm/soft-dirty.txt for full description clear_soft_dirty()
893 " See the linux/Documentation/vm/pagemap.txt for " clear_refs_write()
1333 "linux/Documentation/vm/pagemap.txt for details.\n"); pagemap_open()
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_datagram.c160 * Dispatch datagram as a host, to the host, or other vm context. This
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfa_defs_fcs.h95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dvvp_page.c379 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", vvp_page_print()
/linux-4.1.27/arch/metag/kernel/
H A Dsmp.c285 * from the vm mask set of all processes. __cpu_disable()
/linux-4.1.27/arch/mips/pci/
H A Dpci-alchemy.c461 dev_err(&pdev->dev, "unable to get vm area\n"); alchemy_pci_probe()
/linux-4.1.27/arch/arm64/kernel/
H A Dsmp.c236 * Remove this CPU from the vm mask set of all processes. __cpu_disable()
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-dev.c388 static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) v4l2_mmap() argument
396 ret = vdev->fops->mmap(filp, vm); v4l2_mmap()
/linux-4.1.27/include/media/
H A Dv4l2-subdev.h200 int (*g_modulator)(struct v4l2_subdev *sd, struct v4l2_modulator *vm);
201 int (*s_modulator)(struct v4l2_subdev *sd, const struct v4l2_modulator *vm);
/linux-4.1.27/ipc/
H A Dshm.c501 .open = shm_open, /* callback for a new vm-area open */
502 .close = shm_close, /* callback for when the vm-area is released */
/linux-4.1.27/fs/
H A Dsplice.c39 * a vm helper function, it's already simplified quite a bit by the
1622 * or nasty vm tricks. We simply map in the user memory and fill them into
1628 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
/linux-4.1.27/arch/tile/kernel/
H A Dprocess.c171 /* Save user stack top pointer so we can ID the stack vm area later. */ copy_thread()
/linux-4.1.27/include/net/iucv/
H A Diucv.h19 * www.vm.ibm.com/pubs, manual # SC24-6084
/linux-4.1.27/arch/ia64/include/asm/
H A Dprocessor.h39 * This decides where the kernel will search for a free chunk of vm

Completed in 9022 milliseconds

12