pvma               62 drivers/gpu/drm/nouveau/nouveau_vmm.c nouveau_vma_del(struct nouveau_vma **pvma)
pvma               64 drivers/gpu/drm/nouveau/nouveau_vmm.c 	struct nouveau_vma *vma = *pvma;
pvma               71 drivers/gpu/drm/nouveau/nouveau_vmm.c 		kfree(*pvma);
pvma               72 drivers/gpu/drm/nouveau/nouveau_vmm.c 		*pvma = NULL;
pvma               78 drivers/gpu/drm/nouveau/nouveau_vmm.c 		struct nouveau_vma **pvma)
pvma               85 drivers/gpu/drm/nouveau/nouveau_vmm.c 	if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
pvma               90 drivers/gpu/drm/nouveau/nouveau_vmm.c 	if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
pvma              116 drivers/gpu/drm/nouveau/nouveau_vmm.c 		nouveau_vma_del(pvma);
pvma               35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	      u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
pvma               55 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 				nvkm_memory_size(memory), pvma);
pvma               59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
pvma               63 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
pvma               64 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	*psize = (*pvma)->size;
pvma               32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c 	     u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
pvma               46 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c 	*pvma = ERR_PTR(-ENODEV);
pvma               35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	     u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
pvma               56 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	ret = nvkm_vmm_get(bar, 12, size, pvma);
pvma               60 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
pvma               61 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	*psize = (*pvma)->size;
pvma               62 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
pvma             1624 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
pvma             1626 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma = *pvma;
pvma             1631 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		*pvma = NULL;
pvma             1637 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		    u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
pvma             1769 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	*pvma = vma;
pvma             1774 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
pvma             1778 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
pvma              174 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 			struct nvkm_vma **pvma);
pvma             2588 mm/mempolicy.c 		struct vm_area_struct pvma;
pvma             2606 mm/mempolicy.c 		vma_init(&pvma, NULL);
pvma             2607 mm/mempolicy.c 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
pvma             2608 mm/mempolicy.c 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
pvma              588 mm/nommu.c     	struct vm_area_struct *pvma, *prev;
pvma              613 mm/nommu.c     		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
pvma              617 mm/nommu.c     		if (vma->vm_start < pvma->vm_start)
pvma              619 mm/nommu.c     		else if (vma->vm_start > pvma->vm_start) {
pvma              622 mm/nommu.c     		} else if (vma->vm_end < pvma->vm_end)
pvma              624 mm/nommu.c     		else if (vma->vm_end > pvma->vm_end) {
pvma              627 mm/nommu.c     		} else if (vma < pvma)
pvma              629 mm/nommu.c     		else if (vma > pvma) {
pvma              316 mm/rmap.c      int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
pvma              323 mm/rmap.c      	if (!pvma->anon_vma)
pvma              333 mm/rmap.c      	error = anon_vma_clone(vma, pvma);
pvma              353 mm/rmap.c      	anon_vma->root = pvma->anon_vma->root;
pvma              354 mm/rmap.c      	anon_vma->parent = pvma->anon_vma;
pvma             1453 mm/shmem.c     	struct vm_area_struct pvma;
pvma             1457 mm/shmem.c     	shmem_pseudo_vma_init(&pvma, info, index);
pvma             1458 mm/shmem.c     	vmf.vma = &pvma;
pvma             1461 mm/shmem.c     	shmem_pseudo_vma_destroy(&pvma);
pvma             1469 mm/shmem.c     	struct vm_area_struct pvma;
pvma             1482 mm/shmem.c     	shmem_pseudo_vma_init(&pvma, info, hindex);
pvma             1484 mm/shmem.c     			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
pvma             1485 mm/shmem.c     	shmem_pseudo_vma_destroy(&pvma);
pvma             1494 mm/shmem.c     	struct vm_area_struct pvma;
pvma             1497 mm/shmem.c     	shmem_pseudo_vma_init(&pvma, info, index);
pvma             1498 mm/shmem.c     	page = alloc_page_vma(gfp, &pvma, 0);
pvma             1499 mm/shmem.c     	shmem_pseudo_vma_destroy(&pvma);