Lines Matching refs:vma

47                struct vm_area_struct *vma,
198 struct vm_area_struct *vma; member
206 struct vm_area_struct *vma = st->vma; in mmap_mfn_range() local
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_mfn_range()
219 rc = xen_remap_domain_mfn_range(vma, in mmap_mfn_range()
222 vma->vm_page_prot, in mmap_mfn_range()
236 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local
262 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap()
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) in privcmd_ioctl_mmap()
267 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap()
270 state.va = vma->vm_start; in privcmd_ioctl_mmap()
271 state.vma = vma; in privcmd_ioctl_mmap()
291 struct vm_area_struct *vma; member
315 struct vm_area_struct *vma = st->vma; in mmap_batch_fn() local
316 struct page **pages = vma->vm_private_data; in mmap_batch_fn()
324 ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, in mmap_batch_fn()
325 (int *)mfnp, st->vma->vm_page_prot, in mmap_batch_fn()
395 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) in alloc_empty_pages() argument
411 BUG_ON(vma->vm_private_data != NULL); in alloc_empty_pages()
412 vma->vm_private_data = pages; in alloc_empty_pages()
424 struct vm_area_struct *vma; in privcmd_ioctl_mmap_batch() local
472 vma = find_vma(mm, m.addr); in privcmd_ioctl_mmap_batch()
473 if (!vma || in privcmd_ioctl_mmap_batch()
474 vma->vm_ops != &privcmd_vm_ops) { in privcmd_ioctl_mmap_batch()
490 if (vma->vm_private_data == NULL) { in privcmd_ioctl_mmap_batch()
491 if (m.addr != vma->vm_start || in privcmd_ioctl_mmap_batch()
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { in privcmd_ioctl_mmap_batch()
497 ret = alloc_empty_pages(vma, m.num); in privcmd_ioctl_mmap_batch()
501 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap_batch()
503 if (m.addr < vma->vm_start || in privcmd_ioctl_mmap_batch()
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { in privcmd_ioctl_mmap_batch()
508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { in privcmd_ioctl_mmap_batch()
515 state.vma = vma; in privcmd_ioctl_mmap_batch()
581 static void privcmd_close(struct vm_area_struct *vma) in privcmd_close() argument
583 struct page **pages = vma->vm_private_data; in privcmd_close()
584 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in privcmd_close()
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); in privcmd_close()
599 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in privcmd_fault() argument
602 vma, vma->vm_start, vma->vm_end, in privcmd_fault()
613 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) in privcmd_mmap() argument
617 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | in privcmd_mmap()
619 vma->vm_ops = &privcmd_vm_ops; in privcmd_mmap()
620 vma->vm_private_data = NULL; in privcmd_mmap()
637 struct vm_area_struct *vma, in privcmd_vma_range_is_mapped() argument
641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()