Lines Matching refs:vmf
237 struct vm_fault *vmf) in dax_load_hole() argument
242 page = find_or_create_page(mapping, vmf->pgoff, in dax_load_hole()
248 if (vmf->pgoff >= size) { in dax_load_hole()
254 vmf->page = page; in dax_load_hole()
271 struct vm_area_struct *vma, struct vm_fault *vmf) in dax_insert_mapping() argument
275 unsigned long vaddr = (unsigned long)vmf->virtual_address; in dax_insert_mapping()
291 if (unlikely(vmf->pgoff >= size)) { in dax_insert_mapping()
315 static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, in do_dax_fault() argument
323 unsigned long vaddr = (unsigned long)vmf->virtual_address; in do_dax_fault()
331 if (vmf->pgoff >= size) in do_dax_fault()
335 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); in do_dax_fault()
339 page = find_get_page(mapping, vmf->pgoff); in do_dax_fault()
341 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in do_dax_fault()
351 if (unlikely(vmf->pgoff >= size)) { in do_dax_fault()
367 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { in do_dax_fault()
368 if (vmf->flags & FAULT_FLAG_WRITE) { in do_dax_fault()
378 return dax_load_hole(mapping, page, vmf); in do_dax_fault()
382 if (vmf->cow_page) { in do_dax_fault()
383 struct page *new_page = vmf->cow_page; in do_dax_fault()
390 vmf->page = page; in do_dax_fault()
396 if (vmf->pgoff >= size) { in do_dax_fault()
407 page = find_lock_page(mapping, vmf->pgoff); in do_dax_fault()
410 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, in do_dax_fault()
427 error = dax_insert_mapping(inode, &bh, vma, vmf); in do_dax_fault()
456 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, in dax_fault() argument
462 if (vmf->flags & FAULT_FLAG_WRITE) { in dax_fault()
466 result = do_dax_fault(vma, vmf, get_block, complete_unwritten); in dax_fault()
467 if (vmf->flags & FAULT_FLAG_WRITE) in dax_fault()
480 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) in dax_pfn_mkwrite() argument