vma                11 arch/alpha/include/asm/cacheflush.h #define flush_cache_range(vma, start, end)	do { } while (0)
vma                12 arch/alpha/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
vma                51 arch/alpha/include/asm/cacheflush.h flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                54 arch/alpha/include/asm/cacheflush.h 	if (vma->vm_flags & VM_EXEC) {
vma                55 arch/alpha/include/asm/cacheflush.h 		struct mm_struct *mm = vma->vm_mm;
vma                63 arch/alpha/include/asm/cacheflush.h extern void flush_icache_user_range(struct vm_area_struct *vma,
vma                68 arch/alpha/include/asm/cacheflush.h #define flush_icache_page(vma, page) \
vma                69 arch/alpha/include/asm/cacheflush.h   flush_icache_user_range((vma), (page), 0, 0)
vma                71 arch/alpha/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                73 arch/alpha/include/asm/cacheflush.h      flush_icache_user_range(vma, page, vaddr, len); \
vma                75 arch/alpha/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                79 arch/alpha/include/asm/machvec.h 					  struct vm_area_struct *vma,
vma                20 arch/alpha/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
vma                21 arch/alpha/include/asm/page.h 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
vma                91 arch/alpha/include/asm/pci.h 				      struct vm_area_struct *vma,
vma               329 arch/alpha/include/asm/pgtable.h extern inline void update_mmu_cache(struct vm_area_struct * vma,
vma                40 arch/alpha/include/asm/tlbflush.h 			   struct vm_area_struct *vma,
vma                44 arch/alpha/include/asm/tlbflush.h 	if (vma->vm_flags & VM_EXEC) {
vma                53 arch/alpha/include/asm/tlbflush.h 			   struct vm_area_struct *vma,
vma                56 arch/alpha/include/asm/tlbflush.h 	if (vma->vm_flags & VM_EXEC)
vma               118 arch/alpha/include/asm/tlbflush.h flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma               120 arch/alpha/include/asm/tlbflush.h 	struct mm_struct *mm = vma->vm_mm;
vma               123 arch/alpha/include/asm/tlbflush.h 		flush_tlb_current_page(mm, vma, addr);
vma               131 arch/alpha/include/asm/tlbflush.h flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               134 arch/alpha/include/asm/tlbflush.h 	flush_tlb_mm(vma->vm_mm);
vma                26 arch/alpha/kernel/binfmt_loader.c 	loader = bprm->vma->vm_end - sizeof(void *);
vma                19 arch/alpha/kernel/pci-sysfs.c 				struct vm_area_struct *vma,
vma                29 arch/alpha/kernel/pci-sysfs.c 	vma->vm_pgoff += base >> PAGE_SHIFT;
vma                31 arch/alpha/kernel/pci-sysfs.c 	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma                32 arch/alpha/kernel/pci-sysfs.c 				  vma->vm_end - vma->vm_start,
vma                33 arch/alpha/kernel/pci-sysfs.c 				  vma->vm_page_prot);
vma                37 arch/alpha/kernel/pci-sysfs.c 			   struct vm_area_struct *vma, int sparse)
vma                42 arch/alpha/kernel/pci-sysfs.c 	nr = vma_pages(vma);
vma                43 arch/alpha/kernel/pci-sysfs.c 	start = vma->vm_pgoff;
vma                66 arch/alpha/kernel/pci-sysfs.c 			     struct vm_area_struct *vma, int sparse)
vma                83 arch/alpha/kernel/pci-sysfs.c 	if (!__pci_mmap_fits(pdev, i, vma, sparse))
vma                87 arch/alpha/kernel/pci-sysfs.c 	vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0));
vma                90 arch/alpha/kernel/pci-sysfs.c 	return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
vma                95 arch/alpha/kernel/pci-sysfs.c 				    struct vm_area_struct *vma)
vma                97 arch/alpha/kernel/pci-sysfs.c 	return pci_mmap_resource(kobj, attr, vma, 1);
vma               102 arch/alpha/kernel/pci-sysfs.c 				   struct vm_area_struct *vma)
vma               104 arch/alpha/kernel/pci-sysfs.c 	return pci_mmap_resource(kobj, attr, vma, 0);
vma               253 arch/alpha/kernel/pci-sysfs.c 			      struct vm_area_struct *vma,
vma               258 arch/alpha/kernel/pci-sysfs.c 	nr = vma_pages(vma);
vma               259 arch/alpha/kernel/pci-sysfs.c 	start = vma->vm_pgoff;
vma               282 arch/alpha/kernel/pci-sysfs.c int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
vma               291 arch/alpha/kernel/pci-sysfs.c 	if (!__legacy_mmap_fits(hose, vma, res_size, sparse))
vma               294 arch/alpha/kernel/pci-sysfs.c 	return hose_mmap_page_range(hose, vma, mmap_type, sparse);
vma               674 arch/alpha/kernel/smp.c 	struct vm_area_struct *vma;
vma               686 arch/alpha/kernel/smp.c 		flush_tlb_current_page(mm, data->vma, data->addr);
vma               692 arch/alpha/kernel/smp.c flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma               695 arch/alpha/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
vma               700 arch/alpha/kernel/smp.c 		flush_tlb_current_page(mm, vma, addr);
vma               714 arch/alpha/kernel/smp.c 	data.vma = vma;
vma               725 arch/alpha/kernel/smp.c flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma               728 arch/alpha/kernel/smp.c 	flush_tlb_mm(vma->vm_mm);
vma               743 arch/alpha/kernel/smp.c flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma               746 arch/alpha/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
vma               748 arch/alpha/kernel/smp.c 	if ((vma->vm_flags & VM_EXEC) == 0)
vma                87 arch/alpha/mm/fault.c 	struct vm_area_struct * vma;
vma               121 arch/alpha/mm/fault.c 	vma = find_vma(mm, address);
vma               122 arch/alpha/mm/fault.c 	if (!vma)
vma               124 arch/alpha/mm/fault.c 	if (vma->vm_start <= address)
vma               126 arch/alpha/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               128 arch/alpha/mm/fault.c 	if (expand_stack(vma, address))
vma               136 arch/alpha/mm/fault.c 		if (!(vma->vm_flags & VM_EXEC))
vma               140 arch/alpha/mm/fault.c 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
vma               143 arch/alpha/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               151 arch/alpha/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                27 arch/arc/include/asm/cacheflush.h #define flush_icache_page(vma, page)
vma                57 arch/arc/include/asm/cacheflush.h #define flush_cache_page(vma, u_vaddr, pfn)	/* PF handling/COW-break */
vma                63 arch/arc/include/asm/cacheflush.h void flush_cache_range(struct vm_area_struct *vma,
vma                65 arch/arc/include/asm/cacheflush.h void flush_cache_page(struct vm_area_struct *vma,
vma                73 arch/arc/include/asm/cacheflush.h void flush_anon_page(struct vm_area_struct *vma,
vma               107 arch/arc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
vma               110 arch/arc/include/asm/cacheflush.h 	if (vma->vm_flags & VM_EXEC)					\
vma               114 arch/arc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
vma                 9 arch/arc/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                12 arch/arc/include/asm/fb.h 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                59 arch/arc/include/asm/hugepage.h extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
vma                71 arch/arc/include/asm/hugepage.h extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                21 arch/arc/include/asm/page.h 			unsigned long u_vaddr, struct vm_area_struct *vma);
vma               365 arch/arc/include/asm/pgtable.h void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
vma                13 arch/arc/include/asm/tlbflush.h void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma                15 arch/arc/include/asm/tlbflush.h void local_flush_tlb_range(struct vm_area_struct *vma,
vma                18 arch/arc/include/asm/tlbflush.h void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                23 arch/arc/include/asm/tlbflush.h #define flush_tlb_range(vma, s, e)	local_flush_tlb_range(vma, s, e)
vma                24 arch/arc/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
vma                29 arch/arc/include/asm/tlbflush.h #define flush_pmd_tlb_range(vma, s, e)	local_flush_pmd_tlb_range(vma, s, e)
vma                32 arch/arc/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                34 arch/arc/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma                39 arch/arc/include/asm/tlbflush.h extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma                18 arch/arc/kernel/arc_hostlink.c static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
vma                20 arch/arc/kernel/arc_hostlink.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                22 arch/arc/kernel/arc_hostlink.c 	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma                23 arch/arc/kernel/arc_hostlink.c 			       vma->vm_end - vma->vm_start,
vma                24 arch/arc/kernel/arc_hostlink.c 			       vma->vm_page_prot)) {
vma                86 arch/arc/kernel/troubleshoot.c 	struct vm_area_struct *vma;
vma                93 arch/arc/kernel/troubleshoot.c 	vma = find_vma(active_mm, address);
vma                98 arch/arc/kernel/troubleshoot.c 	if (vma && (vma->vm_start <= address)) {
vma               102 arch/arc/kernel/troubleshoot.c 		if (vma->vm_file) {
vma               103 arch/arc/kernel/troubleshoot.c 			nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
vma               109 arch/arc/kernel/troubleshoot.c 			vma->vm_start < TASK_UNMAPPED_BASE ?
vma               110 arch/arc/kernel/troubleshoot.c 				address : address - vma->vm_start,
vma               111 arch/arc/kernel/troubleshoot.c 			nm, vma->vm_start, vma->vm_end);
vma              1040 arch/arc/mm/cache.c void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
vma              1049 arch/arc/mm/cache.c 	if (vma->vm_flags & VM_EXEC)
vma              1053 arch/arc/mm/cache.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma              1059 arch/arc/mm/cache.c void flush_anon_page(struct vm_area_struct *vma, struct page *page,
vma              1072 arch/arc/mm/cache.c 	unsigned long u_vaddr, struct vm_area_struct *vma)
vma                63 arch/arc/mm/fault.c 	struct vm_area_struct *vma = NULL;
vma               106 arch/arc/mm/fault.c 	vma = find_vma(mm, address);
vma               107 arch/arc/mm/fault.c 	if (!vma)
vma               109 arch/arc/mm/fault.c 	if (unlikely(address < vma->vm_start)) {
vma               110 arch/arc/mm/fault.c 		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
vma               123 arch/arc/mm/fault.c 	if (!(vma->vm_flags & mask)) {
vma               128 arch/arc/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                33 arch/arc/mm/mmap.c 	struct vm_area_struct *vma;
vma                63 arch/arc/mm/mmap.c 		vma = find_vma(mm, addr);
vma                65 arch/arc/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               324 arch/arc/mm/tlb.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               338 arch/arc/mm/tlb.c 		local_flush_tlb_mm(vma->vm_mm);
vma               351 arch/arc/mm/tlb.c 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
vma               353 arch/arc/mm/tlb.c 			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
vma               398 arch/arc/mm/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               408 arch/arc/mm/tlb.c 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
vma               409 arch/arc/mm/tlb.c 		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
vma               465 arch/arc/mm/tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma               468 arch/arc/mm/tlb.c 		.ta_vma = vma,
vma               472 arch/arc/mm/tlb.c 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
vma               475 arch/arc/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               479 arch/arc/mm/tlb.c 		.ta_vma = vma,
vma               484 arch/arc/mm/tlb.c 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
vma               488 arch/arc/mm/tlb.c void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               492 arch/arc/mm/tlb.c 		.ta_vma = vma,
vma               497 arch/arc/mm/tlb.c 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
vma               515 arch/arc/mm/tlb.c void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
vma               547 arch/arc/mm/tlb.c 	if (current->active_mm != vma->vm_mm)
vma               552 arch/arc/mm/tlb.c 	tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
vma               596 arch/arc/mm/tlb.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
vma               603 arch/arc/mm/tlb.c 	create_tlb(vma, vaddr, ptep);
vma               618 arch/arc/mm/tlb.c 	if ((vma->vm_flags & VM_EXEC) ||
vma               627 arch/arc/mm/tlb.c 			if (vma->vm_flags & VM_EXEC)
vma               656 arch/arc/mm/tlb.c void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               660 arch/arc/mm/tlb.c 	update_mmu_cache(vma, addr, &pte);
vma               700 arch/arc/mm/tlb.c void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               710 arch/arc/mm/tlb.c 	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
vma               711 arch/arc/mm/tlb.c 		unsigned int asid = hw_pid(vma->vm_mm, cpu);
vma               170 arch/arm/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma               225 arch/arm/include/asm/cacheflush.h vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma               227 arch/arm/include/asm/cacheflush.h 	struct mm_struct *mm = vma->vm_mm;
vma               231 arch/arm/include/asm/cacheflush.h 					vma->vm_flags);
vma               235 arch/arm/include/asm/cacheflush.h vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
vma               237 arch/arm/include/asm/cacheflush.h 	struct mm_struct *mm = vma->vm_mm;
vma               241 arch/arm/include/asm/cacheflush.h 		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
vma               248 arch/arm/include/asm/cacheflush.h #define flush_cache_range(vma,start,end) \
vma               249 arch/arm/include/asm/cacheflush.h 		vivt_flush_cache_range(vma,start,end)
vma               250 arch/arm/include/asm/cacheflush.h #define flush_cache_page(vma,addr,pfn) \
vma               251 arch/arm/include/asm/cacheflush.h 		vivt_flush_cache_page(vma,addr,pfn)
vma               254 arch/arm/include/asm/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               255 arch/arm/include/asm/cacheflush.h extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
vma               306 arch/arm/include/asm/cacheflush.h static inline void flush_anon_page(struct vm_area_struct *vma,
vma               309 arch/arm/include/asm/cacheflush.h 	extern void __flush_anon_page(struct vm_area_struct *vma,
vma               312 arch/arm/include/asm/cacheflush.h 		__flush_anon_page(vma, page, vmaddr);
vma               321 arch/arm/include/asm/cacheflush.h #define flush_icache_user_range(vma,page,addr,len) \
vma               328 arch/arm/include/asm/cacheflush.h #define flush_icache_page(vma,page)	do { } while (0)
vma               138 arch/arm/include/asm/dma-mapping.h extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma                 8 arch/arm/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                11 arch/arm/include/asm/fb.h 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               113 arch/arm/include/asm/page.h 			unsigned long vaddr, struct vm_area_struct *vma);
vma               129 arch/arm/include/asm/page.h 			unsigned long vaddr, struct vm_area_struct *vma);
vma               136 arch/arm/include/asm/page.h #define copy_user_highpage(to,from,vaddr,vma)	\
vma               137 arch/arm/include/asm/page.h 	__cpu_copy_user_highpage(to, from, vaddr, vma)
vma               420 arch/arm/include/asm/tlbflush.h __local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma               425 arch/arm/include/asm/tlbflush.h 	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
vma               428 arch/arm/include/asm/tlbflush.h 	    cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
vma               442 arch/arm/include/asm/tlbflush.h local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma               446 arch/arm/include/asm/tlbflush.h 	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
vma               451 arch/arm/include/asm/tlbflush.h 	__local_flush_tlb_page(vma, uaddr);
vma               459 arch/arm/include/asm/tlbflush.h __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma               463 arch/arm/include/asm/tlbflush.h 	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
vma               468 arch/arm/include/asm/tlbflush.h 	__local_flush_tlb_page(vma, uaddr);
vma               604 arch/arm/include/asm/tlbflush.h #define local_flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
vma               618 arch/arm/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
vma               620 arch/arm/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               632 arch/arm/include/asm/tlbflush.h extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
vma               635 arch/arm/include/asm/tlbflush.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma               641 arch/arm/include/asm/tlbflush.h #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
vma               650 arch/arm/include/asm/tlbflush.h static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)			{ }
vma               652 arch/arm/include/asm/tlbflush.h static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)	{ }
vma               658 arch/arm/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
vma               660 arch/arm/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               356 arch/arm/kernel/process.c #define is_gate_vma(vma)	((vma) == &gate_vma)
vma               358 arch/arm/kernel/process.c #define is_gate_vma(vma)	0
vma               361 arch/arm/kernel/process.c const char *arch_vma_name(struct vm_area_struct *vma)
vma               363 arch/arm/kernel/process.c 	return is_gate_vma(vma) ? "[vectors]" : NULL;
vma               418 arch/arm/kernel/process.c 	struct vm_area_struct *vma;
vma               441 arch/arm/kernel/process.c 	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
vma               445 arch/arm/kernel/process.c 	if (IS_ERR(vma)) {
vma               446 arch/arm/kernel/process.c 		ret = PTR_ERR(vma);
vma               196 arch/arm/kernel/smp_tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma               200 arch/arm/kernel/smp_tlb.c 		ta.ta_vma = vma;
vma               202 arch/arm/kernel/smp_tlb.c 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
vma               205 arch/arm/kernel/smp_tlb.c 		__flush_tlb_page(vma, uaddr);
vma               206 arch/arm/kernel/smp_tlb.c 	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
vma               220 arch/arm/kernel/smp_tlb.c void flush_tlb_range(struct vm_area_struct *vma,
vma               225 arch/arm/kernel/smp_tlb.c 		ta.ta_vma = vma;
vma               228 arch/arm/kernel/smp_tlb.c 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
vma               231 arch/arm/kernel/smp_tlb.c 		local_flush_tlb_range(vma, start, end);
vma               232 arch/arm/kernel/smp_tlb.c 	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
vma               232 arch/arm/kernel/vdso.c 	struct vm_area_struct *vma;
vma               234 arch/arm/kernel/vdso.c 	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
vma               238 arch/arm/kernel/vdso.c 	return PTR_ERR_OR_ZERO(vma);
vma               244 arch/arm/kernel/vdso.c 	struct vm_area_struct *vma;
vma               259 arch/arm/kernel/vdso.c 	vma = _install_special_mapping(mm, addr, len,
vma               263 arch/arm/kernel/vdso.c 	if (!IS_ERR(vma))
vma               235 arch/arm/mach-integrator/impd1.c static int impd1fb_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
vma               239 arch/arm/mach-integrator/impd1.c 	start = vma->vm_pgoff + (fb->fb.fix.smem_start >> PAGE_SHIFT);
vma               240 arch/arm/mach-integrator/impd1.c 	size = vma->vm_end - vma->vm_start;
vma               242 arch/arm/mach-integrator/impd1.c 	return remap_pfn_range(vma, vma->vm_start, start, size,
vma               243 arch/arm/mach-integrator/impd1.c 			       vma->vm_page_prot);
vma               216 arch/arm/mach-rpc/ecard.c 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
vma               241 arch/arm/mach-rpc/ecard.c 	flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
vma               242 arch/arm/mach-rpc/ecard.c 	flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
vma                39 arch/arm/mm/copypage-fa.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                66 arch/arm/mm/copypage-feroceon.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                72 arch/arm/mm/copypage-feroceon.c 	flush_cache_page(vma, vaddr, page_to_pfn(from));
vma                65 arch/arm/mm/copypage-v4mc.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                48 arch/arm/mm/copypage-v4wb.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                54 arch/arm/mm/copypage-v4wb.c 	flush_cache_page(vma, vaddr, page_to_pfn(from));
vma                44 arch/arm/mm/copypage-v4wt.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                31 arch/arm/mm/copypage-v6.c 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
vma                70 arch/arm/mm/copypage-v6.c 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
vma                64 arch/arm/mm/copypage-xsc3.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                70 arch/arm/mm/copypage-xsc3.c 	flush_cache_page(vma, vaddr, page_to_pfn(from));
vma                85 arch/arm/mm/copypage-xscale.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma                63 arch/arm/mm/dma-mapping-nommu.c static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma                69 arch/arm/mm/dma-mapping-nommu.c 	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
vma                71 arch/arm/mm/dma-mapping-nommu.c 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
vma               205 arch/arm/mm/dma-mapping.c static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma               787 arch/arm/mm/dma-mapping.c static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma               792 arch/arm/mm/dma-mapping.c 	unsigned long nr_vma_pages = vma_pages(vma);
vma               795 arch/arm/mm/dma-mapping.c 	unsigned long off = vma->vm_pgoff;
vma               797 arch/arm/mm/dma-mapping.c 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
vma               801 arch/arm/mm/dma-mapping.c 		ret = remap_pfn_range(vma, vma->vm_start,
vma               803 arch/arm/mm/dma-mapping.c 				      vma->vm_end - vma->vm_start,
vma               804 arch/arm/mm/dma-mapping.c 				      vma->vm_page_prot);
vma               813 arch/arm/mm/dma-mapping.c static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma               817 arch/arm/mm/dma-mapping.c 	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
vma               820 arch/arm/mm/dma-mapping.c int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma               824 arch/arm/mm/dma-mapping.c 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
vma               825 arch/arm/mm/dma-mapping.c 	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
vma              1522 arch/arm/mm/dma-mapping.c static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
vma              1533 arch/arm/mm/dma-mapping.c 	if (vma->vm_pgoff >= nr_pages)
vma              1536 arch/arm/mm/dma-mapping.c 	err = vm_map_pages(vma, pages, nr_pages);
vma              1543 arch/arm/mm/dma-mapping.c 		struct vm_area_struct *vma, void *cpu_addr,
vma              1546 arch/arm/mm/dma-mapping.c 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
vma              1548 arch/arm/mm/dma-mapping.c 	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
vma              1552 arch/arm/mm/dma-mapping.c 		struct vm_area_struct *vma, void *cpu_addr,
vma              1555 arch/arm/mm/dma-mapping.c 	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
vma                37 arch/arm/mm/fault-armv.c static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
vma                53 arch/arm/mm/fault-armv.c 		flush_cache_page(vma, address, pfn);
vma                58 arch/arm/mm/fault-armv.c 		set_pte_at(vma->vm_mm, address, ptep, entry);
vma                59 arch/arm/mm/fault-armv.c 		flush_tlb_page(vma, address);
vma                89 arch/arm/mm/fault-armv.c static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
vma                99 arch/arm/mm/fault-armv.c 	pgd = pgd_offset(vma->vm_mm, address);
vma               116 arch/arm/mm/fault-armv.c 	ptl = pte_lockptr(vma->vm_mm, pmd);
vma               120 arch/arm/mm/fault-armv.c 	ret = do_adjust_pte(vma, address, pfn, pte);
vma               129 arch/arm/mm/fault-armv.c make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
vma               132 arch/arm/mm/fault-armv.c 	struct mm_struct *mm = vma->vm_mm;
vma               138 arch/arm/mm/fault-armv.c 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
vma               152 arch/arm/mm/fault-armv.c 		if (mpnt->vm_mm != mm || mpnt == vma)
vma               161 arch/arm/mm/fault-armv.c 		do_adjust_pte(vma, addr, pfn, ptep);
vma               177 arch/arm/mm/fault-armv.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
vma               200 arch/arm/mm/fault-armv.c 			make_coherent(mapping, vma, addr, ptep, pfn);
vma               201 arch/arm/mm/fault-armv.c 		else if (vma->vm_flags & VM_EXEC)
vma               190 arch/arm/mm/fault.c static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
vma               199 arch/arm/mm/fault.c 	return vma->vm_flags & mask ? false : true;
vma               206 arch/arm/mm/fault.c 	struct vm_area_struct *vma;
vma               209 arch/arm/mm/fault.c 	vma = find_vma(mm, addr);
vma               211 arch/arm/mm/fault.c 	if (unlikely(!vma))
vma               213 arch/arm/mm/fault.c 	if (unlikely(vma->vm_start > addr))
vma               221 arch/arm/mm/fault.c 	if (access_error(fsr, vma)) {
vma               226 arch/arm/mm/fault.c 	return handle_mm_fault(vma, addr & PAGE_MASK, flags);
vma               230 arch/arm/mm/fault.c 	if (vma->vm_flags & VM_GROWSDOWN &&
vma               231 arch/arm/mm/fault.c 	    addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
vma                79 arch/arm/mm/flush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma                82 arch/arm/mm/flush.c 		vivt_flush_cache_range(vma, start, end);
vma                94 arch/arm/mm/flush.c 	if (vma->vm_flags & VM_EXEC)
vma                98 arch/arm/mm/flush.c void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
vma               101 arch/arm/mm/flush.c 		vivt_flush_cache_page(vma, user_addr, pfn);
vma               110 arch/arm/mm/flush.c 	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
vma               159 arch/arm/mm/flush.c void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vma               163 arch/arm/mm/flush.c 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
vma               165 arch/arm/mm/flush.c 	if (vma->vm_flags & VM_EXEC)
vma               185 arch/arm/mm/flush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma               193 arch/arm/mm/flush.c 	flush_ptrace_access(vma, page, uaddr, dst, len);
vma               390 arch/arm/mm/flush.c void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
vma               403 arch/arm/mm/flush.c 		flush_cache_page(vma, vmaddr, pfn);
vma                34 arch/arm/mm/mmap.c 	struct vm_area_struct *vma;
vma                65 arch/arm/mm/mmap.c 		vma = find_vma(mm, addr);
vma                67 arch/arm/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                85 arch/arm/mm/mmap.c 	struct vm_area_struct *vma;
vma               116 arch/arm/mm/mmap.c 		vma = find_vma(mm, addr);
vma               118 arch/arm/mm/mmap.c 				(!vma || addr + len <= vm_start_gap(vma)))
vma               175 arch/arm/mm/nommu.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma               180 arch/arm/mm/nommu.c 	if (vma->vm_flags & VM_EXEC)
vma                65 arch/arm/xen/enlighten.c int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
vma                68 arch/arm/xen/enlighten.c 	return xen_xlate_unmap_gfn_range(vma, nr, pages);
vma               102 arch/arm64/include/asm/cacheflush.h static inline void flush_cache_page(struct vm_area_struct *vma,
vma               107 arch/arm64/include/asm/cacheflush.h static inline void flush_cache_range(struct vm_area_struct *vma,
vma               126 arch/arm64/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma               164 arch/arm64/include/asm/cacheflush.h #define flush_icache_page(vma,page)	do { } while (0)
vma                12 arch/arm64/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                15 arch/arm64/include/asm/fb.h 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma                37 arch/arm64/include/asm/hugetlb.h extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
vma                44 arch/arm64/include/asm/hugetlb.h extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma                54 arch/arm64/include/asm/hugetlb.h extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma               675 arch/arm64/include/asm/pgtable.h extern int ptep_set_access_flags(struct vm_area_struct *vma,
vma               681 arch/arm64/include/asm/pgtable.h static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
vma               685 arch/arm64/include/asm/pgtable.h 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
vma               718 arch/arm64/include/asm/pgtable.h static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma               726 arch/arm64/include/asm/pgtable.h static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
vma               729 arch/arm64/include/asm/pgtable.h 	int young = ptep_test_and_clear_young(vma, address, ptep);
vma               740 arch/arm64/include/asm/pgtable.h 		flush_tlb_page_nosync(vma, address);
vma               748 arch/arm64/include/asm/pgtable.h static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma               752 arch/arm64/include/asm/pgtable.h 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
vma               805 arch/arm64/include/asm/pgtable.h static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
vma               846 arch/arm64/include/asm/pgtable.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma               856 arch/arm64/include/asm/pgtable.h #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
vma                26 arch/arm64/include/asm/tlb.h 	struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
vma                41 arch/arm64/include/asm/tlb.h 	__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
vma               157 arch/arm64/include/asm/tlbflush.h static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
vma               160 arch/arm64/include/asm/tlbflush.h 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
vma               167 arch/arm64/include/asm/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma,
vma               170 arch/arm64/include/asm/tlbflush.h 	flush_tlb_page_nosync(vma, uaddr);
vma               180 arch/arm64/include/asm/tlbflush.h static inline void __flush_tlb_range(struct vm_area_struct *vma,
vma               184 arch/arm64/include/asm/tlbflush.h 	unsigned long asid = ASID(vma->vm_mm);
vma               191 arch/arm64/include/asm/tlbflush.h 		flush_tlb_mm(vma->vm_mm);
vma               214 arch/arm64/include/asm/tlbflush.h static inline void flush_tlb_range(struct vm_area_struct *vma,
vma               221 arch/arm64/include/asm/tlbflush.h 	__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
vma               197 arch/arm64/mm/fault.c int ptep_set_access_flags(struct vm_area_struct *vma,
vma               226 arch/arm64/mm/fault.c 	flush_tlb_fix_spurious_fault(vma, address);
vma               412 arch/arm64/mm/fault.c 	struct vm_area_struct *vma = find_vma(mm, addr);
vma               414 arch/arm64/mm/fault.c 	if (unlikely(!vma))
vma               421 arch/arm64/mm/fault.c 	if (unlikely(vma->vm_start > addr)) {
vma               422 arch/arm64/mm/fault.c 		if (!(vma->vm_flags & VM_GROWSDOWN))
vma               424 arch/arm64/mm/fault.c 		if (expand_stack(vma, addr))
vma               432 arch/arm64/mm/fault.c 	if (!(vma->vm_flags & vm_flags))
vma               434 arch/arm64/mm/fault.c 	return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
vma                33 arch/arm64/mm/flush.c static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vma                37 arch/arm64/mm/flush.c 	if (vma->vm_flags & VM_EXEC)
vma                46 arch/arm64/mm/flush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma                51 arch/arm64/mm/flush.c 	flush_ptrace_access(vma, page, uaddr, dst, len);
vma               143 arch/arm64/mm/hugetlbpage.c 		struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
vma               144 arch/arm64/mm/hugetlbpage.c 		flush_tlb_range(&vma, saddr, addr);
vma               164 arch/arm64/mm/hugetlbpage.c 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
vma               170 arch/arm64/mm/hugetlbpage.c 	flush_tlb_range(&vma, saddr, addr);
vma               297 arch/arm64/mm/hugetlbpage.c pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
vma               300 arch/arm64/mm/hugetlbpage.c 	size_t pagesize = huge_page_size(hstate_vma(vma));
vma               369 arch/arm64/mm/hugetlbpage.c int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma               380 arch/arm64/mm/hugetlbpage.c 		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
vma               382 arch/arm64/mm/hugetlbpage.c 	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
vma               388 arch/arm64/mm/hugetlbpage.c 	orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
vma               399 arch/arm64/mm/hugetlbpage.c 		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
vma               431 arch/arm64/mm/hugetlbpage.c void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma               438 arch/arm64/mm/hugetlbpage.c 		ptep_clear_flush(vma, addr, ptep);
vma               442 arch/arm64/mm/hugetlbpage.c 	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
vma               443 arch/arm64/mm/hugetlbpage.c 	clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
vma               541 arch/arm64/mm/mmu.c 				      pgprot_t prot, struct vm_struct *vma,
vma               556 arch/arm64/mm/mmu.c 	vma->addr	= va_start;
vma               557 arch/arm64/mm/mmu.c 	vma->phys_addr	= pa_start;
vma               558 arch/arm64/mm/mmu.c 	vma->size	= size;
vma               559 arch/arm64/mm/mmu.c 	vma->flags	= VM_MAP | vm_flags;
vma               560 arch/arm64/mm/mmu.c 	vma->caller	= __builtin_return_address(0);
vma               562 arch/arm64/mm/mmu.c 	vm_area_add_early(vma);
vma                26 arch/c6x/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do {} while (0)
vma                43 arch/c6x/include/asm/cacheflush.h #define flush_icache_page(vma, page)					  \
vma                45 arch/c6x/include/asm/cacheflush.h 	if ((vma)->vm_flags & PROT_EXEC)				  \
vma                53 arch/c6x/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                59 arch/c6x/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                36 arch/csky/abiv1/cacheflush.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
vma                53 arch/csky/abiv1/cacheflush.c 		if (vma->vm_flags & VM_EXEC)
vma                69 arch/csky/abiv1/cacheflush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma                74 arch/csky/abiv1/cacheflush.c 	if (vma->vm_flags & VM_EXEC)
vma                15 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
vma                34 arch/csky/abiv1/inc/abi/cacheflush.h static inline void flush_anon_page(struct vm_area_struct *vma,
vma                45 arch/csky/abiv1/inc/abi/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma                49 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_icache_page(vma, page)		do {} while (0);
vma                52 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_icache_user_range(vma,page,addr,len) \
vma                55 arch/csky/abiv1/inc/abi/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                60 arch/csky/abiv1/inc/abi/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                29 arch/csky/abiv1/mmap.c 	struct vm_area_struct *vma;
vma                58 arch/csky/abiv1/mmap.c 		vma = find_vma(mm, addr);
vma                60 arch/csky/abiv1/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                 9 arch/csky/abiv2/cacheflush.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
vma                20 arch/csky/abiv2/cacheflush.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                32 arch/csky/abiv2/cacheflush.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
vma                17 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_range(vma, start, end) \
vma                19 arch/csky/abiv2/inc/abi/cacheflush.h 		if (vma->vm_flags & VM_EXEC) \
vma                23 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
vma                31 arch/csky/abiv2/inc/abi/cacheflush.h void flush_icache_page(struct vm_area_struct *vma, struct page *page);
vma                32 arch/csky/abiv2/inc/abi/cacheflush.h void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                38 arch/csky/abiv2/inc/abi/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                43 arch/csky/abiv2/inc/abi/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma               303 arch/csky/include/asm/pgtable.h void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
vma               309 arch/csky/include/asm/pgtable.h #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
vma               310 arch/csky/include/asm/pgtable.h 	remap_pfn_range(vma, vaddr, pfn, size, prot)
vma                 9 arch/csky/include/asm/tlb.h #define tlb_start_vma(tlb, vma) \
vma                12 arch/csky/include/asm/tlb.h 			flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \
vma                15 arch/csky/include/asm/tlb.h #define tlb_end_vma(tlb, vma) \
vma                18 arch/csky/include/asm/tlb.h 			flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \
vma                18 arch/csky/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma                19 arch/csky/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                77 arch/csky/kernel/vdso.c const char *arch_vma_name(struct vm_area_struct *vma)
vma                79 arch/csky/kernel/vdso.c 	if (vma->vm_mm == NULL)
vma                82 arch/csky/kernel/vdso.c 	if (vma->vm_start == (long)vma->vm_mm->context.vdso)
vma                49 arch/csky/mm/fault.c 	struct vm_area_struct *vma = NULL;
vma               120 arch/csky/mm/fault.c 	vma = find_vma(mm, address);
vma               121 arch/csky/mm/fault.c 	if (!vma)
vma               123 arch/csky/mm/fault.c 	if (vma->vm_start <= address)
vma               125 arch/csky/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               127 arch/csky/mm/fault.c 	if (expand_stack(vma, address))
vma               137 arch/csky/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               140 arch/csky/mm/fault.c 		if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma               149 arch/csky/mm/fault.c 	fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
vma                47 arch/csky/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                50 arch/csky/mm/tlb.c 	unsigned long newpid = cpu_asid(vma->vm_mm);
vma               118 arch/csky/mm/tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma               120 arch/csky/mm/tlb.c 	int newpid = cpu_asid(vma->vm_mm);
vma                31 arch/hexagon/include/asm/cacheflush.h #define flush_cache_range(vma, start, end)	do { } while (0)
vma                32 arch/hexagon/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
vma                37 arch/hexagon/include/asm/cacheflush.h #define flush_icache_page(vma, pg)		do { } while (0)
vma                38 arch/hexagon/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
vma                73 arch/hexagon/include/asm/cacheflush.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma                79 arch/hexagon/include/asm/cacheflush.h void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma                82 arch/hexagon/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                26 arch/hexagon/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
vma                27 arch/hexagon/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma,
vma                83 arch/hexagon/kernel/vdso.c const char *arch_vma_name(struct vm_area_struct *vma)
vma                85 arch/hexagon/kernel/vdso.c 	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
vma               118 arch/hexagon/mm/cache.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma               122 arch/hexagon/mm/cache.c 	if (vma->vm_flags & VM_EXEC) {
vma                38 arch/hexagon/mm/vm_fault.c 	struct vm_area_struct *vma;
vma                59 arch/hexagon/mm/vm_fault.c 	vma = find_vma(mm, address);
vma                60 arch/hexagon/mm/vm_fault.c 	if (!vma)
vma                63 arch/hexagon/mm/vm_fault.c 	if (vma->vm_start <= address)
vma                66 arch/hexagon/mm/vm_fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma                69 arch/hexagon/mm/vm_fault.c 	if (expand_stack(vma, address))
vma                78 arch/hexagon/mm/vm_fault.c 		if (!(vma->vm_flags & VM_EXEC))
vma                82 arch/hexagon/mm/vm_fault.c 		if (!(vma->vm_flags & VM_READ))
vma                86 arch/hexagon/mm/vm_fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma                92 arch/hexagon/mm/vm_fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                25 arch/hexagon/mm/vm_tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                28 arch/hexagon/mm/vm_tlb.c 	struct mm_struct *mm = vma->vm_mm;
vma                66 arch/hexagon/mm/vm_tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
vma                68 arch/hexagon/mm/vm_tlb.c 	struct mm_struct *mm = vma->vm_mm;
vma                23 arch/ia64/include/asm/cacheflush.h #define flush_cache_range(vma, start, end)	do { } while (0)
vma                24 arch/ia64/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
vma                25 arch/ia64/include/asm/cacheflush.h #define flush_icache_page(vma,page)		do { } while (0)
vma                42 arch/ia64/include/asm/cacheflush.h #define flush_icache_user_range(vma, page, user_addr, len)					\
vma                48 arch/ia64/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                50 arch/ia64/include/asm/cacheflush.h      flush_icache_user_range(vma, page, vaddr, len); \
vma                52 arch/ia64/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                10 arch/ia64/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                13 arch/ia64/include/asm/fb.h 	if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
vma                14 arch/ia64/include/asm/fb.h 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma                16 arch/ia64/include/asm/fb.h 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                25 arch/ia64/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                85 arch/ia64/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)		\
vma                88 arch/ia64/include/asm/page.h 		GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);	\
vma                39 arch/ia64/include/asm/pci.h 				      struct vm_area_struct *vma,
vma               410 arch/ia64/include/asm/pgtable.h ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
vma               420 arch/ia64/include/asm/pgtable.h 	set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
vma               459 arch/ia64/include/asm/pgtable.h #define update_mmu_cache(vma, address, ptep) do { } while (0)
vma                95 arch/ia64/include/asm/tlbflush.h extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               101 arch/ia64/include/asm/tlbflush.h flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
vma               104 arch/ia64/include/asm/tlbflush.h 	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
vma               106 arch/ia64/include/asm/tlbflush.h 	if (vma->vm_mm == current->active_mm)
vma               109 arch/ia64/include/asm/tlbflush.h 		vma->vm_mm->context = 0;
vma              2174 arch/ia64/kernel/perfmon.c pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
vma              2182 arch/ia64/kernel/perfmon.c 		if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
vma              2199 arch/ia64/kernel/perfmon.c 	struct vm_area_struct *vma = NULL;
vma              2234 arch/ia64/kernel/perfmon.c 	vma = vm_area_alloc(mm);
vma              2235 arch/ia64/kernel/perfmon.c 	if (!vma) {
vma              2243 arch/ia64/kernel/perfmon.c 	vma->vm_file	     = get_file(filp);
vma              2244 arch/ia64/kernel/perfmon.c 	vma->vm_flags	     = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
vma              2245 arch/ia64/kernel/perfmon.c 	vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
vma              2264 arch/ia64/kernel/perfmon.c 	vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
vma              2265 arch/ia64/kernel/perfmon.c 	if (IS_ERR_VALUE(vma->vm_start)) {
vma              2270 arch/ia64/kernel/perfmon.c 	vma->vm_end = vma->vm_start + size;
vma              2271 arch/ia64/kernel/perfmon.c 	vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
vma              2273 arch/ia64/kernel/perfmon.c 	DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
vma              2276 arch/ia64/kernel/perfmon.c 	if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
vma              2286 arch/ia64/kernel/perfmon.c 	insert_vm_struct(mm, vma);
vma              2288 arch/ia64/kernel/perfmon.c 	vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
vma              2294 arch/ia64/kernel/perfmon.c 	ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
vma              2295 arch/ia64/kernel/perfmon.c 	*(unsigned long *)user_vaddr = vma->vm_start;
vma              2300 arch/ia64/kernel/perfmon.c 	vm_area_free(vma);
vma                64 arch/ia64/mm/fault.c 	struct vm_area_struct *vma, *prev_vma;
vma               107 arch/ia64/mm/fault.c 	vma = find_vma_prev(mm, address, &prev_vma);
vma               108 arch/ia64/mm/fault.c 	if (!vma && !prev_vma )
vma               118 arch/ia64/mm/fault.c         if (( !vma && prev_vma ) || (address < vma->vm_start) )
vma               131 arch/ia64/mm/fault.c 	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
vma               134 arch/ia64/mm/fault.c 	if ((vma->vm_flags & mask) != mask)
vma               142 arch/ia64/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               187 arch/ia64/mm/fault.c 		if (!vma)
vma               189 arch/ia64/mm/fault.c 		if (!(vma->vm_flags & VM_GROWSDOWN))
vma               191 arch/ia64/mm/fault.c 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
vma               194 arch/ia64/mm/fault.c 		if (expand_stack(vma, address))
vma               197 arch/ia64/mm/fault.c 		vma = prev_vma;
vma               198 arch/ia64/mm/fault.c 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
vma               205 arch/ia64/mm/fault.c 		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
vma               207 arch/ia64/mm/fault.c 		if (expand_upwards(vma, address))
vma               105 arch/ia64/mm/init.c 	struct vm_area_struct *vma;
vma               114 arch/ia64/mm/init.c 	vma = vm_area_alloc(current->mm);
vma               115 arch/ia64/mm/init.c 	if (vma) {
vma               116 arch/ia64/mm/init.c 		vma_set_anonymous(vma);
vma               117 arch/ia64/mm/init.c 		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma               118 arch/ia64/mm/init.c 		vma->vm_end = vma->vm_start + PAGE_SIZE;
vma               119 arch/ia64/mm/init.c 		vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma               120 arch/ia64/mm/init.c 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma               122 arch/ia64/mm/init.c 		if (insert_vm_struct(current->mm, vma)) {
vma               124 arch/ia64/mm/init.c 			vm_area_free(vma);
vma               132 arch/ia64/mm/init.c 		vma = vm_area_alloc(current->mm);
vma               133 arch/ia64/mm/init.c 		if (vma) {
vma               134 arch/ia64/mm/init.c 			vma_set_anonymous(vma);
vma               135 arch/ia64/mm/init.c 			vma->vm_end = PAGE_SIZE;
vma               136 arch/ia64/mm/init.c 			vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma               137 arch/ia64/mm/init.c 			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
vma               140 arch/ia64/mm/init.c 			if (insert_vm_struct(current->mm, vma)) {
vma               142 arch/ia64/mm/init.c 				vm_area_free(vma);
vma               312 arch/ia64/mm/tlb.c __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
vma               315 arch/ia64/mm/tlb.c 	struct mm_struct *mm = vma->vm_mm;
vma               350 arch/ia64/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma,
vma               363 arch/ia64/mm/tlb.c 		__flush_tlb_range(vma, start, end);
vma               365 arch/ia64/mm/tlb.c 		__flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
vma               439 arch/ia64/pci/pci.c pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
vma               442 arch/ia64/pci/pci.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               454 arch/ia64/pci/pci.c 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
vma               456 arch/ia64/pci/pci.c 	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma               457 arch/ia64/pci/pci.c 				    vma->vm_page_prot);
vma               463 arch/ia64/pci/pci.c 	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma               464 arch/ia64/pci/pci.c 	vma->vm_page_prot = prot;
vma               466 arch/ia64/pci/pci.c 	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma               467 arch/ia64/pci/pci.c 			    size, vma->vm_page_prot))
vma               206 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cache_range(struct vm_area_struct *vma,
vma               210 arch/m68k/include/asm/cacheflush_mm.h 	if (vma->vm_mm == current->mm)
vma               214 arch/m68k/include/asm/cacheflush_mm.h static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
vma               216 arch/m68k/include/asm/cacheflush_mm.h 	if (vma->vm_mm == current->mm)
vma               255 arch/m68k/include/asm/cacheflush_mm.h #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
vma               257 arch/m68k/include/asm/cacheflush_mm.h extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma               261 arch/m68k/include/asm/cacheflush_mm.h static inline void copy_to_user_page(struct vm_area_struct *vma,
vma               265 arch/m68k/include/asm/cacheflush_mm.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
vma               267 arch/m68k/include/asm/cacheflush_mm.h 	flush_icache_user_range(vma, page, vaddr, len);
vma               269 arch/m68k/include/asm/cacheflush_mm.h static inline void copy_from_user_page(struct vm_area_struct *vma,
vma               273 arch/m68k/include/asm/cacheflush_mm.h 	flush_cache_page(vma, vaddr, page_to_pfn(page));
vma                14 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_range(vma, start, end)	do { } while (0)
vma                15 arch/m68k/include/asm/cacheflush_no.h #define flush_cache_page(vma, vmaddr)		do { } while (0)
vma                22 arch/m68k/include/asm/cacheflush_no.h #define flush_icache_page(vma,pg)		do { } while (0)
vma                23 arch/m68k/include/asm/cacheflush_no.h #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
vma                27 arch/m68k/include/asm/cacheflush_no.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                29 arch/m68k/include/asm/cacheflush_no.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                12 arch/m68k/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                15 arch/m68k/include/asm/fb.h 	pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
vma                18 arch/m68k/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                22 arch/m68k/include/asm/fb.h 		pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
vma                24 arch/m68k/include/asm/fb.h 		pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
vma                26 arch/m68k/include/asm/fb.h 		pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
vma                16 arch/m68k/include/asm/page_no.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
vma                17 arch/m68k/include/asm/page_no.h 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
vma               132 arch/m68k/include/asm/pgtable_mm.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma                85 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma                87 arch/m68k/include/asm/tlbflush.h 	if (vma->vm_mm == current->active_mm) {
vma                95 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_range(struct vm_area_struct *vma,
vma                98 arch/m68k/include/asm/tlbflush.h 	if (vma->vm_mm == current->active_mm)
vma               171 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_page (struct vm_area_struct *vma,
vma               178 arch/m68k/include/asm/tlbflush.h 	sun3_put_context(vma->vm_mm->context);
vma               192 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_range (struct vm_area_struct *vma,
vma               195 arch/m68k/include/asm/tlbflush.h 	struct mm_struct *mm = vma->vm_mm;
vma               261 arch/m68k/include/asm/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma               392 arch/m68k/kernel/sys_m68k.c 		struct vm_area_struct *vma;
vma               403 arch/m68k/kernel/sys_m68k.c 		vma = find_vma(current->mm, addr);
vma               404 arch/m68k/kernel/sys_m68k.c 		if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
vma               109 arch/m68k/mm/cache.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                72 arch/m68k/mm/fault.c 	struct vm_area_struct * vma;
vma                91 arch/m68k/mm/fault.c 	vma = find_vma(mm, address);
vma                92 arch/m68k/mm/fault.c 	if (!vma)
vma                94 arch/m68k/mm/fault.c 	if (vma->vm_flags & VM_IO)
vma                96 arch/m68k/mm/fault.c 	if (vma->vm_start <= address)
vma                98 arch/m68k/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               108 arch/m68k/mm/fault.c 	if (expand_stack(vma, address))
vma               121 arch/m68k/mm/fault.c 			if (!(vma->vm_flags & VM_WRITE))
vma               128 arch/m68k/mm/fault.c 			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
vma               138 arch/m68k/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                64 arch/microblaze/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, adr, len)	flush_icache();
vma                65 arch/microblaze/include/asm/cacheflush.h #define flush_icache_page(vma, pg)			do { } while (0)
vma                92 arch/microblaze/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn) \
vma                97 arch/microblaze/include/asm/cacheflush.h #define flush_cache_range(vma, start, len)	{	\
vma               103 arch/microblaze/include/asm/cacheflush.h #define flush_cache_range(vma, start, len) do { } while (0)
vma               105 arch/microblaze/include/asm/cacheflush.h static inline void copy_to_user_page(struct vm_area_struct *vma,
vma               111 arch/microblaze/include/asm/cacheflush.h 	if (vma->vm_flags & VM_EXEC) {
vma               117 arch/microblaze/include/asm/cacheflush.h static inline void copy_from_user_page(struct vm_area_struct *vma,
vma                56 arch/microblaze/include/asm/pci.h 				      struct vm_area_struct *vma,
vma               433 arch/microblaze/include/asm/pgtable.h static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma                33 arch/microblaze/include/asm/tlbflush.h static inline void local_flush_tlb_page(struct vm_area_struct *vma,
vma                36 arch/microblaze/include/asm/tlbflush.h static inline void local_flush_tlb_range(struct vm_area_struct *vma,
vma                42 arch/microblaze/include/asm/tlbflush.h #define update_mmu_cache(vma, addr, ptep)	do { } while (0)
vma                62 arch/microblaze/include/asm/tlbflush.h #define flush_tlb_page(vma, addr)		BUG()
vma                89 arch/microblaze/mm/fault.c 	struct vm_area_struct *vma;
vma               148 arch/microblaze/mm/fault.c 	vma = find_vma(mm, address);
vma               149 arch/microblaze/mm/fault.c 	if (unlikely(!vma))
vma               152 arch/microblaze/mm/fault.c 	if (vma->vm_start <= address)
vma               155 arch/microblaze/mm/fault.c 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
vma               170 arch/microblaze/mm/fault.c 	if (unlikely(address + 0x100000 < vma->vm_end)) {
vma               193 arch/microblaze/mm/fault.c 	if (expand_stack(vma, address))
vma               201 arch/microblaze/mm/fault.c 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
vma               209 arch/microblaze/mm/fault.c 		if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
vma               218 arch/microblaze/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               153 arch/microblaze/pci/pci-common.c int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
vma               164 arch/microblaze/pci/pci-common.c 	vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
vma               304 arch/microblaze/pci/pci-common.c 			       struct vm_area_struct *vma,
vma               309 arch/microblaze/pci/pci-common.c 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
vma               310 arch/microblaze/pci/pci-common.c 	resource_size_t size = vma->vm_end - vma->vm_start;
vma               334 arch/microblaze/pci/pci-common.c 			if (vma->vm_flags & VM_SHARED)
vma               335 arch/microblaze/pci/pci-common.c 				return shmem_zero_setup(vma);
vma               352 arch/microblaze/pci/pci-common.c 	vma->vm_pgoff = offset >> PAGE_SHIFT;
vma               353 arch/microblaze/pci/pci-common.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               354 arch/microblaze/pci/pci-common.c 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma               355 arch/microblaze/pci/pci-common.c 			       vma->vm_end - vma->vm_start,
vma               356 arch/microblaze/pci/pci-common.c 			       vma->vm_page_prot);
vma                50 arch/mips/include/asm/cacheflush.h extern void (*flush_cache_range)(struct vm_area_struct *vma,
vma                52 arch/mips/include/asm/cacheflush.h extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
vma                69 arch/mips/include/asm/cacheflush.h static inline void flush_anon_page(struct vm_area_struct *vma,
vma                76 arch/mips/include/asm/cacheflush.h static inline void flush_icache_page(struct vm_area_struct *vma,
vma               104 arch/mips/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *vma,
vma               108 arch/mips/include/asm/cacheflush.h extern void copy_from_user_page(struct vm_area_struct *vma,
vma                 8 arch/mips/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                11 arch/mips/include/asm/fb.h 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                53 arch/mips/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                56 arch/mips/include/asm/hugetlb.h 	flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
vma                67 arch/mips/include/asm/hugetlb.h static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma                75 arch/mips/include/asm/hugetlb.h 		set_pte_at(vma->vm_mm, addr, ptep, pte);
vma                80 arch/mips/include/asm/hugetlb.h 		flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
vma               115 arch/mips/include/asm/page.h 	unsigned long vaddr, struct vm_area_struct *vma);
vma               474 arch/mips/include/asm/pgtable.h extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
vma               477 arch/mips/include/asm/pgtable.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma               481 arch/mips/include/asm/pgtable.h 	__update_tlb(vma, address, pte);
vma               484 arch/mips/include/asm/pgtable.h static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
vma               489 arch/mips/include/asm/pgtable.h 	__update_tlb(vma, address, pte);
vma               495 arch/mips/include/asm/pgtable.h extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
vma               497 arch/mips/include/asm/pgtable.h static inline int io_remap_pfn_range(struct vm_area_struct *vma,
vma               504 arch/mips/include/asm/pgtable.h 	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
vma                17 arch/mips/include/asm/tlbflush.h extern void local_flush_tlb_range(struct vm_area_struct *vma,
vma                21 arch/mips/include/asm/tlbflush.h extern void local_flush_tlb_page(struct vm_area_struct *vma,
vma                31 arch/mips/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
vma                41 arch/mips/include/asm/tlbflush.h #define flush_tlb_range(vma, vmaddr, end)	local_flush_tlb_range(vma, vmaddr, end)
vma                44 arch/mips/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
vma               554 arch/mips/kernel/smp.c 	struct vm_area_struct *vma;
vma               563 arch/mips/kernel/smp.c 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
vma               566 arch/mips/kernel/smp.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma               568 arch/mips/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
vma               590 arch/mips/kernel/smp.c 			.vma = vma,
vma               596 arch/mips/kernel/smp.c 		local_flush_tlb_range(vma, start, end);
vma               599 arch/mips/kernel/smp.c 		int exec = vma->vm_flags & VM_EXEC;
vma               611 arch/mips/kernel/smp.c 		local_flush_tlb_range(vma, start, end);
vma               637 arch/mips/kernel/smp.c 	local_flush_tlb_page(fd->vma, fd->addr1);
vma               640 arch/mips/kernel/smp.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               648 arch/mips/kernel/smp.c 		write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
vma               655 arch/mips/kernel/smp.c 	} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
vma               656 arch/mips/kernel/smp.c 		   (current->mm != vma->vm_mm)) {
vma               658 arch/mips/kernel/smp.c 			.vma = vma,
vma               663 arch/mips/kernel/smp.c 		local_flush_tlb_page(vma, page);
vma               674 arch/mips/kernel/smp.c 			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
vma               675 arch/mips/kernel/smp.c 				set_cpu_context(cpu, vma->vm_mm, 1);
vma               677 arch/mips/kernel/smp.c 		local_flush_tlb_page(vma, page);
vma               742 arch/mips/kernel/traps.c 	struct vm_area_struct *vma;
vma               758 arch/mips/kernel/traps.c 		vma = find_vma(current->mm, (unsigned long)fault_addr);
vma               759 arch/mips/kernel/traps.c 		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
vma                92 arch/mips/kernel/vdso.c 	struct vm_area_struct *vma;
vma               147 arch/mips/kernel/vdso.c 	vma = _install_special_mapping(mm, base, vvar_size,
vma               150 arch/mips/kernel/vdso.c 	if (IS_ERR(vma)) {
vma               151 arch/mips/kernel/vdso.c 		ret = PTR_ERR(vma);
vma               159 arch/mips/kernel/vdso.c 		ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
vma               166 arch/mips/kernel/vdso.c 	ret = remap_pfn_range(vma, data_addr,
vma               173 arch/mips/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_addr, image->size,
vma               177 arch/mips/kernel/vdso.c 	if (IS_ERR(vma)) {
vma               178 arch/mips/kernel/vdso.c 		ret = PTR_ERR(vma);
vma                65 arch/mips/mm/c-octeon.c static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
vma                83 arch/mips/mm/c-octeon.c 	if (vma)
vma                84 arch/mips/mm/c-octeon.c 		mask = *mm_cpumask(vma->vm_mm);
vma               137 arch/mips/mm/c-octeon.c static void octeon_flush_cache_range(struct vm_area_struct *vma,
vma               140 arch/mips/mm/c-octeon.c 	if (vma->vm_flags & VM_EXEC)
vma               141 arch/mips/mm/c-octeon.c 		octeon_flush_icache_all_cores(vma);
vma               152 arch/mips/mm/c-octeon.c static void octeon_flush_cache_page(struct vm_area_struct *vma,
vma               155 arch/mips/mm/c-octeon.c 	if (vma->vm_flags & VM_EXEC)
vma               156 arch/mips/mm/c-octeon.c 		octeon_flush_icache_all_cores(vma);
vma               232 arch/mips/mm/c-r3k.c static void r3k_flush_cache_range(struct vm_area_struct *vma,
vma               237 arch/mips/mm/c-r3k.c static void r3k_flush_cache_page(struct vm_area_struct *vma,
vma               241 arch/mips/mm/c-r3k.c 	int exec = vma->vm_flags & VM_EXEC;
vma               242 arch/mips/mm/c-r3k.c 	struct mm_struct *mm = vma->vm_mm;
vma               578 arch/mips/mm/c-r4k.c 	struct vm_area_struct *vma = args;
vma               579 arch/mips/mm/c-r4k.c 	int exec = vma->vm_flags & VM_EXEC;
vma               581 arch/mips/mm/c-r4k.c 	if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
vma               596 arch/mips/mm/c-r4k.c static void r4k_flush_cache_range(struct vm_area_struct *vma,
vma               599 arch/mips/mm/c-r4k.c 	int exec = vma->vm_flags & VM_EXEC;
vma               602 arch/mips/mm/c-r4k.c 		r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
vma               638 arch/mips/mm/c-r4k.c 	struct vm_area_struct *vma;
vma               646 arch/mips/mm/c-r4k.c 	struct vm_area_struct *vma = fcp_args->vma;
vma               649 arch/mips/mm/c-r4k.c 	int exec = vma->vm_flags & VM_EXEC;
vma               650 arch/mips/mm/c-r4k.c 	struct mm_struct *mm = vma->vm_mm;
vma               717 arch/mips/mm/c-r4k.c static void r4k_flush_cache_page(struct vm_area_struct *vma,
vma               722 arch/mips/mm/c-r4k.c 	args.vma = vma;
vma               157 arch/mips/mm/c-tx39.c static void tx39_flush_cache_range(struct vm_area_struct *vma,
vma               162 arch/mips/mm/c-tx39.c 	if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
vma               168 arch/mips/mm/c-tx39.c static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
vma               170 arch/mips/mm/c-tx39.c 	int exec = vma->vm_flags & VM_EXEC;
vma               171 arch/mips/mm/c-tx39.c 	struct mm_struct *mm = vma->vm_mm;
vma                30 arch/mips/mm/cache.c void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
vma                32 arch/mips/mm/cache.c void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
vma                41 arch/mips/mm/fault.c 	struct vm_area_struct * vma = NULL;
vma               101 arch/mips/mm/fault.c 	vma = find_vma(mm, address);
vma               102 arch/mips/mm/fault.c 	if (!vma)
vma               104 arch/mips/mm/fault.c 	if (vma->vm_start <= address)
vma               106 arch/mips/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               108 arch/mips/mm/fault.c 	if (expand_stack(vma, address))
vma               118 arch/mips/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               123 arch/mips/mm/fault.c 			if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
vma               133 arch/mips/mm/fault.c 			if (!(vma->vm_flags & VM_READ) &&
vma               145 arch/mips/mm/fault.c 			if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma               155 arch/mips/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               172 arch/mips/mm/init.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma               195 arch/mips/mm/init.c void copy_to_user_page(struct vm_area_struct *vma,
vma               209 arch/mips/mm/init.c 	if (vma->vm_flags & VM_EXEC)
vma               210 arch/mips/mm/init.c 		flush_cache_page(vma, vaddr, page_to_pfn(page));
vma               213 arch/mips/mm/init.c void copy_from_user_page(struct vm_area_struct *vma,
vma                34 arch/mips/mm/mmap.c 	struct vm_area_struct *vma;
vma                68 arch/mips/mm/mmap.c 		vma = find_vma(mm, addr);
vma                70 arch/mips/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                70 arch/mips/mm/tlb-r3k.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                74 arch/mips/mm/tlb-r3k.c 	struct mm_struct *mm = vma->vm_mm;
vma               150 arch/mips/mm/tlb-r3k.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               155 arch/mips/mm/tlb-r3k.c 	if (cpu_context(cpu, vma->vm_mm) != 0) {
vma               160 arch/mips/mm/tlb-r3k.c 		printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
vma               162 arch/mips/mm/tlb-r3k.c 		newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
vma               182 arch/mips/mm/tlb-r3k.c void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma               191 arch/mips/mm/tlb-r3k.c 	if (current->active_mm != vma->vm_mm)
vma               197 arch/mips/mm/tlb-r3k.c 	if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
vma               199 arch/mips/mm/tlb-r3k.c 		       (cpu_context(cpu, vma->vm_mm)), pid);
vma                49 arch/mips/mm/tlb-r4k.c static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
vma                51 arch/mips/mm/tlb-r4k.c 	if (vma->vm_flags & VM_EXEC)
vma               107 arch/mips/mm/tlb-r4k.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               110 arch/mips/mm/tlb-r4k.c 	struct mm_struct *mm = vma->vm_mm;
vma               212 arch/mips/mm/tlb-r4k.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               216 arch/mips/mm/tlb-r4k.c 	if (cpu_context(cpu, vma->vm_mm) != 0) {
vma               228 arch/mips/mm/tlb-r4k.c 			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
vma               230 arch/mips/mm/tlb-r4k.c 			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
vma               251 arch/mips/mm/tlb-r4k.c 		flush_micro_tlb_vm(vma);
vma               294 arch/mips/mm/tlb-r4k.c void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
vma               306 arch/mips/mm/tlb-r4k.c 	if (current->active_mm != vma->vm_mm)
vma               319 arch/mips/mm/tlb-r4k.c 	pgdp = pgd_offset(vma->vm_mm, address);
vma               374 arch/mips/mm/tlb-r4k.c 	flush_micro_tlb_vm(vma);
vma                14 arch/nds32/include/asm/cacheflush.h void flush_icache_page(struct vm_area_struct *vma, struct page *page);
vma                20 arch/nds32/include/asm/cacheflush.h void flush_cache_range(struct vm_area_struct *vma,
vma                22 arch/nds32/include/asm/cacheflush.h void flush_cache_page(struct vm_area_struct *vma,
vma                30 arch/nds32/include/asm/cacheflush.h void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma                32 arch/nds32/include/asm/cacheflush.h void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
vma                36 arch/nds32/include/asm/cacheflush.h void flush_anon_page(struct vm_area_struct *vma,
vma                47 arch/nds32/include/asm/cacheflush.h void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                27 arch/nds32/include/asm/page.h 			       unsigned long vaddr, struct vm_area_struct *vma);
vma                34 arch/nds32/include/asm/proc-fns.h extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
vma                33 arch/nds32/include/asm/tlbflush.h void local_flush_tlb_range(struct vm_area_struct *vma,
vma                35 arch/nds32/include/asm/tlbflush.h void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
vma                43 arch/nds32/include/asm/tlbflush.h void update_mmu_cache(struct vm_area_struct *vma,
vma                31 arch/nds32/kernel/sys_nds32.c 	struct vm_area_struct *vma;
vma                34 arch/nds32/kernel/sys_nds32.c 	vma = find_vma(current->mm, start);
vma                35 arch/nds32/kernel/sys_nds32.c 	if (!vma)
vma                49 arch/nds32/kernel/sys_nds32.c 	cpu_cache_wbinval_range_check(vma, start, end, flushi, wbd);
vma               118 arch/nds32/kernel/vdso.c 	struct vm_area_struct *vma;
vma               154 arch/nds32/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_base, vvar_page_num * PAGE_SIZE,
vma               156 arch/nds32/kernel/vdso.c 	if (IS_ERR(vma)) {
vma               157 arch/nds32/kernel/vdso.c 		ret = PTR_ERR(vma);
vma               162 arch/nds32/kernel/vdso.c 	ret = io_remap_pfn_range(vma, vdso_base,
vma               164 arch/nds32/kernel/vdso.c 				 PAGE_SIZE, vma->vm_page_prot);
vma               171 arch/nds32/kernel/vdso.c 	ret = io_remap_pfn_range(vma, vdso_base, timer_mapping_base >> PAGE_SHIFT,
vma               179 arch/nds32/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_base, vdso_text_len,
vma               183 arch/nds32/kernel/vdso.c 	if (IS_ERR(vma)) {
vma               184 arch/nds32/kernel/vdso.c 		ret = PTR_ERR(vma);
vma                28 arch/nds32/mm/cacheflush.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
vma                34 arch/nds32/mm/cacheflush.c 	cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
vma                40 arch/nds32/mm/cacheflush.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                49 arch/nds32/mm/cacheflush.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
vma                59 arch/nds32/mm/cacheflush.c 	if (vma->vm_mm == current->active_mm) {
vma                69 arch/nds32/mm/cacheflush.c 	    (vma->vm_flags & VM_EXEC)) {
vma                73 arch/nds32/mm/cacheflush.c 		cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
vma               133 arch/nds32/mm/cacheflush.c void flush_cache_range(struct vm_area_struct *vma,
vma               140 arch/nds32/mm/cacheflush.c 		if (vma->vm_flags & VM_EXEC)
vma               146 arch/nds32/mm/cacheflush.c 		if (va_present(vma->vm_mm, start))
vma               147 arch/nds32/mm/cacheflush.c 			cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
vma               154 arch/nds32/mm/cacheflush.c void flush_cache_page(struct vm_area_struct *vma,
vma               161 arch/nds32/mm/cacheflush.c 	cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
vma               198 arch/nds32/mm/cacheflush.c 			unsigned long vaddr, struct vm_area_struct *vma)
vma               266 arch/nds32/mm/cacheflush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma               275 arch/nds32/mm/cacheflush.c 	if (vma->vm_flags & VM_EXEC) {
vma               287 arch/nds32/mm/cacheflush.c void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
vma               300 arch/nds32/mm/cacheflush.c void flush_anon_page(struct vm_area_struct *vma,
vma               307 arch/nds32/mm/cacheflush.c 	if (vma->vm_mm != current->active_mm)
vma               311 arch/nds32/mm/cacheflush.c 	if (vma->vm_flags & VM_EXEC)
vma                75 arch/nds32/mm/fault.c 	struct vm_area_struct *vma;
vma               145 arch/nds32/mm/fault.c 	vma = find_vma(mm, addr);
vma               147 arch/nds32/mm/fault.c 	if (unlikely(!vma))
vma               150 arch/nds32/mm/fault.c 	if (vma->vm_start <= addr)
vma               153 arch/nds32/mm/fault.c 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
vma               156 arch/nds32/mm/fault.c 	if (unlikely(expand_stack(vma, addr)))
vma               197 arch/nds32/mm/fault.c 	if (!(vma->vm_flags & mask))
vma               206 arch/nds32/mm/fault.c 	fault = handle_mm_fault(vma, addr, flags);
vma                27 arch/nds32/mm/mmap.c 	struct vm_area_struct *vma;
vma                60 arch/nds32/mm/mmap.c 		vma = find_vma(mm, addr);
vma                62 arch/nds32/mm/mmap.c 		    (!vma || addr + len <= vma->vm_start))
vma               348 arch/nds32/mm/proc.c void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
vma               372 arch/nds32/mm/proc.c 		if (va_present(vma->vm_mm, start)) {
vma               381 arch/nds32/mm/proc.c 	if (va_present(vma->vm_mm, start)) {
vma               388 arch/nds32/mm/proc.c 	if (va_present(vma->vm_mm, end - 1)) {
vma               396 arch/nds32/mm/proc.c 		if (va_present(vma->vm_mm, t_start)) {
vma                14 arch/nds32/mm/tlb.c void local_flush_tlb_range(struct vm_area_struct *vma,
vma                27 arch/nds32/mm/tlb.c 	ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
vma                38 arch/nds32/mm/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma                44 arch/nds32/mm/tlb.c 	ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
vma                26 arch/nios2/include/asm/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma                28 arch/nios2/include/asm/cacheflush.h extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
vma                34 arch/nios2/include/asm/cacheflush.h extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
vma                39 arch/nios2/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma                42 arch/nios2/include/asm/cacheflush.h extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
vma               297 arch/nios2/include/asm/pgtable.h extern void update_mmu_cache(struct vm_area_struct *vma,
vma                26 arch/nios2/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                30 arch/nios2/include/asm/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma,
vma                33 arch/nios2/include/asm/tlbflush.h 	flush_tlb_range(vma, address, address + PAGE_SIZE);
vma                41 arch/nios2/include/asm/tlbflush.h extern void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr,
vma                24 arch/nios2/kernel/sys_nios2.c 	struct vm_area_struct *vma;
vma                41 arch/nios2/kernel/sys_nios2.c 	vma = find_vma(current->mm, addr);
vma                42 arch/nios2/kernel/sys_nios2.c 	if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
vma                45 arch/nios2/kernel/sys_nios2.c 	flush_cache_range(vma, addr, addr + len);
vma               132 arch/nios2/mm/cacheflush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma               136 arch/nios2/mm/cacheflush.c 	if (vma == NULL || (vma->vm_flags & VM_EXEC))
vma               140 arch/nios2/mm/cacheflush.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
vma               149 arch/nios2/mm/cacheflush.c void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
vma               156 arch/nios2/mm/cacheflush.c 	if (vma->vm_flags & VM_EXEC)
vma               200 arch/nios2/mm/cacheflush.c void update_mmu_cache(struct vm_area_struct *vma,
vma               208 arch/nios2/mm/cacheflush.c 	reload_tlb_page(vma, address, pte);
vma               228 arch/nios2/mm/cacheflush.c 		if (vma->vm_flags & VM_EXEC)
vma               229 arch/nios2/mm/cacheflush.c 			flush_icache_page(vma, page);
vma               252 arch/nios2/mm/cacheflush.c void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
vma               256 arch/nios2/mm/cacheflush.c 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
vma               259 arch/nios2/mm/cacheflush.c 	if (vma->vm_flags & VM_EXEC)
vma               263 arch/nios2/mm/cacheflush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma               267 arch/nios2/mm/cacheflush.c 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
vma               270 arch/nios2/mm/cacheflush.c 	if (vma->vm_flags & VM_EXEC)
vma                45 arch/nios2/mm/fault.c 	struct vm_area_struct *vma = NULL;
vma                93 arch/nios2/mm/fault.c 	vma = find_vma(mm, address);
vma                94 arch/nios2/mm/fault.c 	if (!vma)
vma                96 arch/nios2/mm/fault.c 	if (vma->vm_start <= address)
vma                98 arch/nios2/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               100 arch/nios2/mm/fault.c 	if (expand_stack(vma, address))
vma               115 arch/nios2/mm/fault.c 		if (!(vma->vm_flags & VM_EXEC))
vma               119 arch/nios2/mm/fault.c 		if (!(vma->vm_flags & VM_READ))
vma               123 arch/nios2/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               134 arch/nios2/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               127 arch/nios2/mm/init.c const char *arch_vma_name(struct vm_area_struct *vma)
vma               129 arch/nios2/mm/init.c 	return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
vma               103 arch/nios2/mm/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               106 arch/nios2/mm/tlb.c 	unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
vma               114 arch/nios2/mm/tlb.c void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
vma               116 arch/nios2/mm/tlb.c 	unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
vma                72 arch/openrisc/include/asm/cacheflush.h #define flush_cache_range(vma, start, end)		do { } while (0)
vma                73 arch/openrisc/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)		do { } while (0)
vma                77 arch/openrisc/include/asm/cacheflush.h #define flush_icache_page(vma, pg)			do { } while (0)
vma                78 arch/openrisc/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
vma                82 arch/openrisc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)           \
vma                85 arch/openrisc/include/asm/cacheflush.h 		if (vma->vm_flags & VM_EXEC)                         \
vma                89 arch/openrisc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len)         \
vma               415 arch/openrisc/include/asm/pgtable.h static inline void update_tlb(struct vm_area_struct *vma,
vma               420 arch/openrisc/include/asm/pgtable.h extern void update_cache(struct vm_area_struct *vma,
vma               423 arch/openrisc/include/asm/pgtable.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma               426 arch/openrisc/include/asm/pgtable.h 	update_tlb(vma, address, pte);
vma               427 arch/openrisc/include/asm/pgtable.h 	update_cache(vma, address, pte);
vma                34 arch/openrisc/include/asm/tlbflush.h extern void local_flush_tlb_page(struct vm_area_struct *vma,
vma                36 arch/openrisc/include/asm/tlbflush.h extern void local_flush_tlb_range(struct vm_area_struct *vma,
vma                48 arch/openrisc/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
vma                49 arch/openrisc/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               236 arch/openrisc/kernel/smp.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma               241 arch/openrisc/kernel/smp.c void flush_tlb_range(struct vm_area_struct *vma,
vma                42 arch/openrisc/mm/cache.c void update_cache(struct vm_area_struct *vma, unsigned long address,
vma                54 arch/openrisc/mm/cache.c 	if ((vma->vm_flags & VM_EXEC) && dirty)
vma                50 arch/openrisc/mm/fault.c 	struct vm_area_struct *vma;
vma               108 arch/openrisc/mm/fault.c 	vma = find_vma(mm, address);
vma               110 arch/openrisc/mm/fault.c 	if (!vma)
vma               113 arch/openrisc/mm/fault.c 	if (vma->vm_start <= address)
vma               116 arch/openrisc/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               129 arch/openrisc/mm/fault.c 	if (expand_stack(vma, address))
vma               143 arch/openrisc/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               148 arch/openrisc/mm/fault.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma               153 arch/openrisc/mm/fault.c 	if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
vma               162 arch/openrisc/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                84 arch/openrisc/mm/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma                97 arch/openrisc/mm/tlb.c void local_flush_tlb_range(struct vm_area_struct *vma,
vma                61 arch/parisc/include/asm/cacheflush.h #define flush_icache_page(vma,page)	do { 		\
vma                71 arch/parisc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                73 arch/parisc/include/asm/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
vma                78 arch/parisc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                80 arch/parisc/include/asm/cacheflush.h 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
vma                84 arch/parisc/include/asm/cacheflush.h void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
vma                85 arch/parisc/include/asm/cacheflush.h void flush_cache_range(struct vm_area_struct *vma,
vma                93 arch/parisc/include/asm/cacheflush.h flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
vma                96 arch/parisc/include/asm/cacheflush.h 		flush_tlb_page(vma, vmaddr);
vma                 9 arch/parisc/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                12 arch/parisc/include/asm/fb.h 	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
vma                37 arch/parisc/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                47 arch/parisc/include/asm/hugetlb.h int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma               498 arch/parisc/include/asm/pgtable.h static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
vma               506 arch/parisc/include/asm/pgtable.h 	spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
vma               509 arch/parisc/include/asm/pgtable.h 		spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
vma               513 arch/parisc/include/asm/pgtable.h 	purge_tlb_entries(vma->vm_mm, addr);
vma               514 arch/parisc/include/asm/pgtable.h 	spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
vma                19 arch/parisc/include/asm/tlbflush.h #define flush_tlb_range(vma, start, end) \
vma                20 arch/parisc/include/asm/tlbflush.h 	__flush_tlb_range((vma)->vm_mm->context, start, end)
vma                64 arch/parisc/include/asm/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma,
vma                67 arch/parisc/include/asm/tlbflush.h 	purge_tlb_entries(vma->vm_mm, addr);
vma                87 arch/parisc/kernel/cache.c update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
vma               304 arch/parisc/kernel/cache.c __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
vma               309 arch/parisc/kernel/cache.c 	if (vma->vm_flags & VM_EXEC)
vma               315 arch/parisc/kernel/cache.c __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
vma               320 arch/parisc/kernel/cache.c 	if (vma->vm_flags & VM_EXEC)
vma               524 arch/parisc/kernel/cache.c 	struct vm_area_struct *vma;
vma               527 arch/parisc/kernel/cache.c 	for (vma = mm->mmap; vma; vma = vma->vm_next)
vma               528 arch/parisc/kernel/cache.c 		usize += vma->vm_end - vma->vm_start;
vma               549 arch/parisc/kernel/cache.c 	struct vm_area_struct *vma;
vma               563 arch/parisc/kernel/cache.c 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               564 arch/parisc/kernel/cache.c 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
vma               565 arch/parisc/kernel/cache.c 			if (vma->vm_flags & VM_EXEC)
vma               566 arch/parisc/kernel/cache.c 				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
vma               567 arch/parisc/kernel/cache.c 			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
vma               573 arch/parisc/kernel/cache.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               576 arch/parisc/kernel/cache.c 		for (addr = vma->vm_start; addr < vma->vm_end;
vma               586 arch/parisc/kernel/cache.c 				flush_tlb_page(vma, addr);
vma               587 arch/parisc/kernel/cache.c 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
vma               589 arch/parisc/kernel/cache.c 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
vma               595 arch/parisc/kernel/cache.c void flush_cache_range(struct vm_area_struct *vma,
vma               603 arch/parisc/kernel/cache.c 		if (vma->vm_mm->context)
vma               604 arch/parisc/kernel/cache.c 			flush_tlb_range(vma, start, end);
vma               609 arch/parisc/kernel/cache.c 	if (vma->vm_mm->context == mfsp(3)) {
vma               611 arch/parisc/kernel/cache.c 		if (vma->vm_flags & VM_EXEC)
vma               613 arch/parisc/kernel/cache.c 		flush_tlb_range(vma, start, end);
vma               617 arch/parisc/kernel/cache.c 	pgd = vma->vm_mm->pgd;
vma               618 arch/parisc/kernel/cache.c 	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
vma               625 arch/parisc/kernel/cache.c 			if (unlikely(vma->vm_mm->context)) {
vma               626 arch/parisc/kernel/cache.c 				flush_tlb_page(vma, addr);
vma               627 arch/parisc/kernel/cache.c 				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
vma               629 arch/parisc/kernel/cache.c 				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
vma               636 arch/parisc/kernel/cache.c flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
vma               639 arch/parisc/kernel/cache.c 		if (likely(vma->vm_mm->context)) {
vma               640 arch/parisc/kernel/cache.c 			flush_tlb_page(vma, vmaddr);
vma               641 arch/parisc/kernel/cache.c 			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
vma               643 arch/parisc/kernel/cache.c 			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
vma                86 arch/parisc/kernel/sys_parisc.c 	struct vm_area_struct *vma, *prev;
vma               113 arch/parisc/kernel/sys_parisc.c 		vma = find_vma_prev(mm, addr, &prev);
vma               115 arch/parisc/kernel/sys_parisc.c 		    (!vma || addr + len <= vm_start_gap(vma)) &&
vma               140 arch/parisc/kernel/sys_parisc.c 	struct vm_area_struct *vma, *prev;
vma               170 arch/parisc/kernel/sys_parisc.c 		vma = find_vma_prev(mm, addr, &prev);
vma               172 arch/parisc/kernel/sys_parisc.c 		    (!vma || addr + len <= vm_start_gap(vma)) &&
vma               718 arch/parisc/kernel/traps.c 			struct vm_area_struct *vma;
vma               721 arch/parisc/kernel/traps.c 			vma = find_vma(current->mm,regs->iaoq[0]);
vma               722 arch/parisc/kernel/traps.c 			if (vma && (regs->iaoq[0] >= vma->vm_start)
vma               723 arch/parisc/kernel/traps.c 				&& (vma->vm_flags & VM_EXEC)) {
vma               236 arch/parisc/mm/fault.c 		struct vm_area_struct *vma)
vma               250 arch/parisc/mm/fault.c 		vma ? ',':'\n');
vma               252 arch/parisc/mm/fault.c 	if (vma)
vma               254 arch/parisc/mm/fault.c 			vma->vm_start, vma->vm_end);
vma               262 arch/parisc/mm/fault.c 	struct vm_area_struct *vma, *prev_vma;
vma               286 arch/parisc/mm/fault.c 	vma = find_vma_prev(mm, address, &prev_vma);
vma               287 arch/parisc/mm/fault.c 	if (!vma || address < vma->vm_start)
vma               296 arch/parisc/mm/fault.c 	if ((vma->vm_flags & acc_type) != acc_type)
vma               305 arch/parisc/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               346 arch/parisc/mm/fault.c 	vma = prev_vma;
vma               347 arch/parisc/mm/fault.c 	if (vma && (expand_stack(vma, address) == 0))
vma               362 arch/parisc/mm/fault.c 			if (!vma ||
vma               363 arch/parisc/mm/fault.c 			    address < vma->vm_start || address >= vma->vm_end) {
vma               370 arch/parisc/mm/fault.c 			if ((vma->vm_flags & acc_type) != acc_type) {
vma               411 arch/parisc/mm/fault.c 		show_signal_msg(regs, code, address, tsk, vma);
vma               175 arch/parisc/mm/hugetlbpage.c int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma               181 arch/parisc/mm/hugetlbpage.c 	struct mm_struct *mm = vma->vm_mm;
vma               331 arch/powerpc/include/asm/book3s/32/pgtable.h static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
vma               341 arch/powerpc/include/asm/book3s/32/pgtable.h 	flush_tlb_page(vma, address);
vma                10 arch/powerpc/include/asm/book3s/32/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                11 arch/powerpc/include/asm/book3s/32/tlbflush.h extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
vma                12 arch/powerpc/include/asm/book3s/32/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                15 arch/powerpc/include/asm/book3s/32/tlbflush.h static inline void local_flush_tlb_page(struct vm_area_struct *vma,
vma                18 arch/powerpc/include/asm/book3s/32/tlbflush.h 	flush_tlb_page(vma, vmaddr);
vma                69 arch/powerpc/include/asm/book3s/64/hash-4k.h #define remap_4k_pfn(vma, addr, pfn, prot)	\
vma                70 arch/powerpc/include/asm/book3s/64/hash-4k.h 	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
vma               149 arch/powerpc/include/asm/book3s/64/hash-4k.h extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
vma               162 arch/powerpc/include/asm/book3s/64/hash-64k.h static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
vma               169 arch/powerpc/include/asm/book3s/64/hash-64k.h 	return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
vma               266 arch/powerpc/include/asm/book3s/64/hash-64k.h extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
vma                 9 arch/powerpc/include/asm/book3s/64/hugetlb.h void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                10 arch/powerpc/include/asm/book3s/64/hugetlb.h void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                16 arch/powerpc/include/asm/book3s/64/hugetlb.h extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
vma                57 arch/powerpc/include/asm/book3s/64/hugetlb.h extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
vma                61 arch/powerpc/include/asm/book3s/64/hugetlb.h extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
vma                87 arch/powerpc/include/asm/book3s/64/hugetlb.h static inline void flush_hugetlb_page(struct vm_area_struct *vma,
vma                91 arch/powerpc/include/asm/book3s/64/hugetlb.h 		return radix__flush_hugetlb_page(vma, vmaddr);
vma               107 arch/powerpc/include/asm/book3s/64/hugetlb.h void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                67 arch/powerpc/include/asm/book3s/64/pgtable-64k.h static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
vma                72 arch/powerpc/include/asm/book3s/64/pgtable-64k.h 	return hash__remap_4k_pfn(vma, addr, pfn, prot);
vma               789 arch/powerpc/include/asm/book3s/64/pgtable.h static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
vma               795 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__ptep_set_access_flags(vma, ptep, entry,
vma              1142 arch/powerpc/include/asm/book3s/64/pgtable.h extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
vma              1237 arch/powerpc/include/asm/book3s/64/pgtable.h extern int pmdp_set_access_flags(struct vm_area_struct *vma,
vma              1242 arch/powerpc/include/asm/book3s/64/pgtable.h extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma              1254 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
vma              1258 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pmdp_collapse_flush(vma, address, pmdp);
vma              1259 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pmdp_collapse_flush(vma, address, pmdp);
vma              1282 arch/powerpc/include/asm/book3s/64/pgtable.h extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
vma              1289 arch/powerpc/include/asm/book3s/64/pgtable.h 				  struct vm_area_struct *vma);
vma               124 arch/powerpc/include/asm/book3s/64/radix.h extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
vma               128 arch/powerpc/include/asm/book3s/64/radix.h extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
vma               250 arch/powerpc/include/asm/book3s/64/radix.h extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
vma                92 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
vma                97 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
vma               102 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
vma                44 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
vma                48 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
vma                50 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                56 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                63 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                69 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h #define radix__flush_tlb_page(vma,addr)	radix__local_flush_tlb_page(vma,addr)
vma                50 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
vma                54 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__flush_pmd_tlb_range(vma, start, end);
vma                55 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__flush_tlb_range(vma, start, end);
vma                59 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
vma                64 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__flush_hugetlb_tlb_range(vma, start, end);
vma                65 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__flush_tlb_range(vma, start, end);
vma                68 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_tlb_range(struct vm_area_struct *vma,
vma                72 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__flush_tlb_range(vma, start, end);
vma                73 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__flush_tlb_range(vma, start, end);
vma                91 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void local_flush_tlb_page(struct vm_area_struct *vma,
vma                95 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__local_flush_tlb_page(vma, vmaddr);
vma                96 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__local_flush_tlb_page(vma, vmaddr);
vma               121 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma,
vma               125 arch/powerpc/include/asm/book3s/64/tlbflush.h 		return radix__flush_tlb_page(vma, vmaddr);
vma               126 arch/powerpc/include/asm/book3s/64/tlbflush.h 	return hash__flush_tlb_page(vma, vmaddr);
vma               137 arch/powerpc/include/asm/book3s/64/tlbflush.h #define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
vma               142 arch/powerpc/include/asm/book3s/64/tlbflush.h static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
vma               146 arch/powerpc/include/asm/book3s/64/tlbflush.h 	if (atomic_read(&vma->vm_mm->context.copros) > 0)
vma               147 arch/powerpc/include/asm/book3s/64/tlbflush.h 		flush_tlb_page(vma, address);
vma                21 arch/powerpc/include/asm/book3s/pgtable.h extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
vma                38 arch/powerpc/include/asm/book3s/pgtable.h void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
vma                19 arch/powerpc/include/asm/cacheflush.h #define flush_cache_range(vma, start, end)	do { } while (0)
vma                20 arch/powerpc/include/asm/cacheflush.h #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
vma                21 arch/powerpc/include/asm/cacheflush.h #define flush_icache_page(vma, page)		do { } while (0)
vma                46 arch/powerpc/include/asm/cacheflush.h extern void flush_icache_user_range(struct vm_area_struct *vma,
vma               118 arch/powerpc/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma               121 arch/powerpc/include/asm/cacheflush.h 		flush_icache_user_range(vma, page, vaddr, len); \
vma               123 arch/powerpc/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                 9 arch/powerpc/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                12 arch/powerpc/include/asm/fb.h 	vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
vma                13 arch/powerpc/include/asm/fb.h 						 vma->vm_end - vma->vm_start,
vma                14 arch/powerpc/include/asm/fb.h 						 vma->vm_page_prot);
vma                51 arch/powerpc/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                54 arch/powerpc/include/asm/hugetlb.h 	huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
vma                55 arch/powerpc/include/asm/hugetlb.h 	flush_hugetlb_page(vma, addr);
vma                59 arch/powerpc/include/asm/hugetlb.h int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma                70 arch/powerpc/include/asm/hugetlb.h static inline void flush_hugetlb_page(struct vm_area_struct *vma,
vma               242 arch/powerpc/include/asm/mmu_context.h 				     struct vm_area_struct *vma)
vma               247 arch/powerpc/include/asm/mmu_context.h bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
vma               251 arch/powerpc/include/asm/mmu_context.h static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
vma                27 arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h static inline void flush_hugetlb_page(struct vm_area_struct *vma,
vma                30 arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h 	flush_tlb_page(vma, vmaddr);
vma               315 arch/powerpc/include/asm/nohash/32/pgtable.h static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
vma               327 arch/powerpc/include/asm/nohash/32/pgtable.h 	flush_tlb_page(vma, address);
vma                90 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define remap_4k_pfn(vma, addr, pfn, prot)	\
vma                91 arch/powerpc/include/asm/nohash/64/pgtable-4k.h 	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
vma               305 arch/powerpc/include/asm/nohash/64/pgtable.h static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
vma               329 arch/powerpc/include/asm/nohash/64/pgtable.h 	flush_tlb_page(vma, address);
vma                29 arch/powerpc/include/asm/nohash/hugetlb-book3e.h void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma               230 arch/powerpc/include/asm/nohash/pgtable.h extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
vma               303 arch/powerpc/include/asm/nohash/pgtable.h void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
vma               306 arch/powerpc/include/asm/nohash/pgtable.h void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
vma                32 arch/powerpc/include/asm/nohash/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                37 arch/powerpc/include/asm/nohash/tlbflush.h extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                44 arch/powerpc/include/asm/nohash/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
vma                49 arch/powerpc/include/asm/nohash/tlbflush.h #define flush_tlb_page(vma,addr)	local_flush_tlb_page(vma,addr)
vma                85 arch/powerpc/include/asm/pci.h 				      struct vm_area_struct *vma,
vma                45 arch/powerpc/include/asm/pkeys.h static inline int vma_pkey(struct vm_area_struct *vma)
vma                49 arch/powerpc/include/asm/pkeys.h 	return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
vma               159 arch/powerpc/include/asm/pkeys.h extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
vma               161 arch/powerpc/include/asm/pkeys.h static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
vma               174 arch/powerpc/include/asm/pkeys.h 	return __arch_override_mprotect_pkey(vma, prot, pkey);
vma                23 arch/powerpc/include/asm/tlb.h #define tlb_start_vma(tlb, vma)	do { } while (0)
vma                24 arch/powerpc/include/asm/tlb.h #define tlb_end_vma(tlb, vma)	do { } while (0)
vma               417 arch/powerpc/kernel/pci-common.c int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
vma               428 arch/powerpc/kernel/pci-common.c 	vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
vma               568 arch/powerpc/kernel/pci-common.c 			       struct vm_area_struct *vma,
vma               573 arch/powerpc/kernel/pci-common.c 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
vma               574 arch/powerpc/kernel/pci-common.c 	resource_size_t size = vma->vm_end - vma->vm_start;
vma               595 arch/powerpc/kernel/pci-common.c 			if (vma->vm_flags & VM_SHARED)
vma               596 arch/powerpc/kernel/pci-common.c 				return shmem_zero_setup(vma);
vma               612 arch/powerpc/kernel/pci-common.c 	vma->vm_pgoff = offset >> PAGE_SHIFT;
vma               613 arch/powerpc/kernel/pci-common.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               614 arch/powerpc/kernel/pci-common.c 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma               615 arch/powerpc/kernel/pci-common.c 			       vma->vm_end - vma->vm_start,
vma               616 arch/powerpc/kernel/pci-common.c 			       vma->vm_page_prot);
vma                31 arch/powerpc/kernel/proc_powerpc.c static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
vma                33 arch/powerpc/kernel/proc_powerpc.c 	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE)
vma                36 arch/powerpc/kernel/proc_powerpc.c 	remap_pfn_range(vma, vma->vm_start,
vma                38 arch/powerpc/kernel/proc_powerpc.c 			PAGE_SIZE, vma->vm_page_prot);
vma               222 arch/powerpc/kernel/vdso.c const char *arch_vma_name(struct vm_area_struct *vma)
vma               224 arch/powerpc/kernel/vdso.c 	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
vma               508 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct vm_area_struct *vma;
vma               596 arch/powerpc/kvm/book3s_64_mmu_hv.c 		vma = find_vma(current->mm, hva);
vma               597 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
vma               598 arch/powerpc/kvm/book3s_64_mmu_hv.c 		    (vma->vm_flags & VM_PFNMAP)) {
vma               599 arch/powerpc/kvm/book3s_64_mmu_hv.c 			pfn = vma->vm_pgoff +
vma               600 arch/powerpc/kvm/book3s_64_mmu_hv.c 				((hva - vma->vm_start) >> PAGE_SHIFT);
vma               602 arch/powerpc/kvm/book3s_64_mmu_hv.c 			is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
vma               603 arch/powerpc/kvm/book3s_64_mmu_hv.c 			write_ok = vma->vm_flags & VM_WRITE;
vma               213 arch/powerpc/kvm/book3s_64_vio.c 	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
vma               232 arch/powerpc/kvm/book3s_64_vio.c static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
vma               234 arch/powerpc/kvm/book3s_64_vio.c 	vma->vm_ops = &kvm_spapr_tce_vm_ops;
vma              4582 arch/powerpc/kvm/book3s_hv.c 	struct vm_area_struct *vma;
vma              4619 arch/powerpc/kvm/book3s_hv.c 	vma = find_vma(current->mm, hva);
vma              4620 arch/powerpc/kvm/book3s_hv.c 	if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
vma              4623 arch/powerpc/kvm/book3s_hv.c 	psize = vma_kernel_pagesize(vma);
vma               229 arch/powerpc/kvm/book3s_xive_native.c 	struct vm_area_struct *vma = vmf->vma;
vma               230 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_device *dev = vma->vm_file->private_data;
vma               245 arch/powerpc/kvm/book3s_xive_native.c 	page_offset = vmf->pgoff - vma->vm_pgoff;
vma               272 arch/powerpc/kvm/book3s_xive_native.c 	vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
vma               282 arch/powerpc/kvm/book3s_xive_native.c 	struct vm_area_struct *vma = vmf->vma;
vma               284 arch/powerpc/kvm/book3s_xive_native.c 	switch (vmf->pgoff - vma->vm_pgoff) {
vma               289 arch/powerpc/kvm/book3s_xive_native.c 		vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
vma               302 arch/powerpc/kvm/book3s_xive_native.c 				   struct vm_area_struct *vma)
vma               307 arch/powerpc/kvm/book3s_xive_native.c 	if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
vma               308 arch/powerpc/kvm/book3s_xive_native.c 		if (vma_pages(vma) > 4)
vma               310 arch/powerpc/kvm/book3s_xive_native.c 		vma->vm_ops = &xive_native_tima_vmops;
vma               311 arch/powerpc/kvm/book3s_xive_native.c 	} else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
vma               312 arch/powerpc/kvm/book3s_xive_native.c 		if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
vma               314 arch/powerpc/kvm/book3s_xive_native.c 		vma->vm_ops = &xive_native_esb_vmops;
vma               319 arch/powerpc/kvm/book3s_xive_native.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               320 arch/powerpc/kvm/book3s_xive_native.c 	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma               327 arch/powerpc/kvm/book3s_xive_native.c 	xive->mapping = vma->vm_file->f_mapping;
vma               357 arch/powerpc/kvm/e500_mmu_host.c 		struct vm_area_struct *vma;
vma               360 arch/powerpc/kvm/e500_mmu_host.c 		vma = find_vma(current->mm, hva);
vma               361 arch/powerpc/kvm/e500_mmu_host.c 		if (vma && hva >= vma->vm_start &&
vma               362 arch/powerpc/kvm/e500_mmu_host.c 		    (vma->vm_flags & VM_PFNMAP)) {
vma               375 arch/powerpc/kvm/e500_mmu_host.c 			start = vma->vm_pgoff;
vma               377 arch/powerpc/kvm/e500_mmu_host.c 			      vma_pages(vma);
vma               379 arch/powerpc/kvm/e500_mmu_host.c 			pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
vma               424 arch/powerpc/kvm/e500_mmu_host.c 		} else if (vma && hva >= vma->vm_start &&
vma               425 arch/powerpc/kvm/e500_mmu_host.c 			   (vma->vm_flags & VM_HUGETLB)) {
vma               426 arch/powerpc/kvm/e500_mmu_host.c 			unsigned long psize = vma_kernel_pagesize(vma);
vma               319 arch/powerpc/mm/book3s32/mmu.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
vma               341 arch/powerpc/mm/book3s32/mmu.c 	hash_preload(vma->vm_mm, address);
vma               138 arch/powerpc/mm/book3s32/tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma               147 arch/powerpc/mm/book3s32/tlb.c 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
vma               159 arch/powerpc/mm/book3s32/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               162 arch/powerpc/mm/book3s32/tlb.c 	flush_range(vma->vm_mm, start, end);
vma               132 arch/powerpc/mm/book3s64/hash_hugetlbpage.c pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
vma               141 arch/powerpc/mm/book3s64/hash_hugetlbpage.c 	pte_val = pte_update(vma->vm_mm, addr, ptep,
vma               147 arch/powerpc/mm/book3s64/hash_hugetlbpage.c void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
vma               152 arch/powerpc/mm/book3s64/hash_hugetlbpage.c 		return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
vma               154 arch/powerpc/mm/book3s64/hash_hugetlbpage.c 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
vma               222 arch/powerpc/mm/book3s64/hash_pgtable.c pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
vma               248 arch/powerpc/mm/book3s64/hash_pgtable.c 	serialize_against_pte_lookup(vma->vm_mm);
vma               258 arch/powerpc/mm/book3s64/hash_pgtable.c 	flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
vma              1610 arch/powerpc/mm/book3s64/hash_utils.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
vma              1650 arch/powerpc/mm/book3s64/hash_utils.c 	hash_preload(vma->vm_mm, address, is_exec, trap);
vma                35 arch/powerpc/mm/book3s64/pgtable.c int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
vma                41 arch/powerpc/mm/book3s64/pgtable.c 	assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
vma                49 arch/powerpc/mm/book3s64/pgtable.c 		__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
vma                55 arch/powerpc/mm/book3s64/pgtable.c int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma                58 arch/powerpc/mm/book3s64/pgtable.c 	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
vma               105 arch/powerpc/mm/book3s64/pgtable.c pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
vma               110 arch/powerpc/mm/book3s64/pgtable.c 	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
vma               111 arch/powerpc/mm/book3s64/pgtable.c 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               119 arch/powerpc/mm/book3s64/pgtable.c 	serialize_against_pte_lookup(vma->vm_mm);
vma               156 arch/powerpc/mm/book3s64/pgtable.c void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               420 arch/powerpc/mm/book3s64/pgtable.c pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
vma               430 arch/powerpc/mm/book3s64/pgtable.c 	pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
vma               436 arch/powerpc/mm/book3s64/pgtable.c void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
vma               440 arch/powerpc/mm/book3s64/pgtable.c 		return radix__ptep_modify_prot_commit(vma, addr,
vma               442 arch/powerpc/mm/book3s64/pgtable.c 	set_pte_at(vma->vm_mm, addr, ptep, pte);
vma               459 arch/powerpc/mm/book3s64/pgtable.c 			   struct vm_area_struct *vma)
vma               462 arch/powerpc/mm/book3s64/pgtable.c 		return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
vma               325 arch/powerpc/mm/book3s64/pkeys.c static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
vma               328 arch/powerpc/mm/book3s64/pkeys.c 	if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
vma               331 arch/powerpc/mm/book3s64/pkeys.c 	return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
vma               337 arch/powerpc/mm/book3s64/pkeys.c int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
vma               344 arch/powerpc/mm/book3s64/pkeys.c 	if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
vma               352 arch/powerpc/mm/book3s64/pkeys.c 		pkey = execute_only_pkey(vma->vm_mm);
vma               358 arch/powerpc/mm/book3s64/pkeys.c 	return vma_pkey(vma);
vma               394 arch/powerpc/mm/book3s64/pkeys.c static inline bool vma_is_foreign(struct vm_area_struct *vma)
vma               400 arch/powerpc/mm/book3s64/pkeys.c 	if (current->mm != vma->vm_mm)
vma               406 arch/powerpc/mm/book3s64/pkeys.c bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
vma               414 arch/powerpc/mm/book3s64/pkeys.c 	if (foreign || vma_is_foreign(vma))
vma               417 arch/powerpc/mm/book3s64/pkeys.c 	return pkey_access_permitted(vma_pkey(vma), write, execute);
vma                12 arch/powerpc/mm/book3s64/radix_hugetlbpage.c void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma                15 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct hstate *hstate = hstate_file(vma->vm_file);
vma                18 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
vma                21 arch/powerpc/mm/book3s64/radix_hugetlbpage.c void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma                24 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct hstate *hstate = hstate_file(vma->vm_file);
vma                27 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
vma                30 arch/powerpc/mm/book3s64/radix_hugetlbpage.c void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                34 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct hstate *hstate = hstate_file(vma->vm_file);
vma                37 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
vma                51 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct vm_area_struct *vma;
vma                76 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 		vma = find_vma(mm, addr);
vma                78 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                95 arch/powerpc/mm/book3s64/radix_hugetlbpage.c void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
vma                99 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	struct mm_struct *mm = vma->vm_mm;
vma               107 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 		radix__flush_hugetlb_page(vma, addr);
vma               109 arch/powerpc/mm/book3s64/radix_hugetlbpage.c 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
vma               940 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
vma               956 arch/powerpc/mm/book3s64/radix_pgtable.c 	serialize_against_pte_lookup(vma->vm_mm);
vma               958 arch/powerpc/mm/book3s64/radix_pgtable.c 	radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
vma              1032 arch/powerpc/mm/book3s64/radix_pgtable.c void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
vma              1035 arch/powerpc/mm/book3s64/radix_pgtable.c 	struct mm_struct *mm = vma->vm_mm;
vma              1066 arch/powerpc/mm/book3s64/radix_pgtable.c void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
vma              1070 arch/powerpc/mm/book3s64/radix_pgtable.c 	struct mm_struct *mm = vma->vm_mm;
vma              1079 arch/powerpc/mm/book3s64/radix_pgtable.c 		radix__flush_tlb_page(vma, addr);
vma               604 arch/powerpc/mm/book3s64/radix_tlb.c void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma               608 arch/powerpc/mm/book3s64/radix_tlb.c 	if (is_vm_hugetlb_page(vma))
vma               609 arch/powerpc/mm/book3s64/radix_tlb.c 		return radix__local_flush_hugetlb_page(vma, vmaddr);
vma               611 arch/powerpc/mm/book3s64/radix_tlb.c 	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
vma               774 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma               777 arch/powerpc/mm/book3s64/radix_tlb.c 	if (is_vm_hugetlb_page(vma))
vma               778 arch/powerpc/mm/book3s64/radix_tlb.c 		return radix__flush_hugetlb_page(vma, vmaddr);
vma               780 arch/powerpc/mm/book3s64/radix_tlb.c 	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
vma               939 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               944 arch/powerpc/mm/book3s64/radix_tlb.c 	if (is_vm_hugetlb_page(vma))
vma               945 arch/powerpc/mm/book3s64/radix_tlb.c 		return radix__flush_hugetlb_tlb_range(vma, start, end);
vma               948 arch/powerpc/mm/book3s64/radix_tlb.c 	__radix__flush_tlb_range(vma->vm_mm, start, end, false);
vma              1185 arch/powerpc/mm/book3s64/radix_tlb.c void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
vma              1188 arch/powerpc/mm/book3s64/radix_tlb.c 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
vma               137 arch/powerpc/mm/book3s64/subpage_prot.c 	struct vm_area_struct *vma = walk->vma;
vma               138 arch/powerpc/mm/book3s64/subpage_prot.c 	split_huge_pmd(vma, pmd, addr);
vma               149 arch/powerpc/mm/book3s64/subpage_prot.c 	struct vm_area_struct *vma;
vma               155 arch/powerpc/mm/book3s64/subpage_prot.c 	vma = find_vma(mm, addr);
vma               159 arch/powerpc/mm/book3s64/subpage_prot.c 	if (vma && ((addr + len) <= vma->vm_start))
vma               162 arch/powerpc/mm/book3s64/subpage_prot.c 	while (vma) {
vma               163 arch/powerpc/mm/book3s64/subpage_prot.c 		if (vma->vm_start >= (addr + len))
vma               165 arch/powerpc/mm/book3s64/subpage_prot.c 		vma->vm_flags |= VM_NOHUGEPAGE;
vma               166 arch/powerpc/mm/book3s64/subpage_prot.c 		walk_page_vma(vma, &subpage_walk_ops, NULL);
vma               167 arch/powerpc/mm/book3s64/subpage_prot.c 		vma = vma->vm_next;
vma                26 arch/powerpc/mm/copro_fault.c 	struct vm_area_struct *vma;
vma                38 arch/powerpc/mm/copro_fault.c 	vma = find_vma(mm, ea);
vma                39 arch/powerpc/mm/copro_fault.c 	if (!vma)
vma                42 arch/powerpc/mm/copro_fault.c 	if (ea < vma->vm_start) {
vma                43 arch/powerpc/mm/copro_fault.c 		if (!(vma->vm_flags & VM_GROWSDOWN))
vma                45 arch/powerpc/mm/copro_fault.c 		if (expand_stack(vma, ea))
vma                51 arch/powerpc/mm/copro_fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma                54 arch/powerpc/mm/copro_fault.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma                67 arch/powerpc/mm/copro_fault.c 	*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
vma               245 arch/powerpc/mm/fault.c 				struct vm_area_struct *vma, unsigned int flags,
vma               257 arch/powerpc/mm/fault.c 	if (address + 0x100000 < vma->vm_end) {
vma               297 arch/powerpc/mm/fault.c 			 struct vm_area_struct *vma)
vma               310 arch/powerpc/mm/fault.c 		return !(vma->vm_flags & VM_EXEC) &&
vma               312 arch/powerpc/mm/fault.c 			 !(vma->vm_flags & (VM_READ | VM_WRITE)));
vma               316 arch/powerpc/mm/fault.c 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
vma               321 arch/powerpc/mm/fault.c 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
vma               439 arch/powerpc/mm/fault.c 	struct vm_area_struct * vma;
vma               536 arch/powerpc/mm/fault.c 	vma = find_vma(mm, address);
vma               537 arch/powerpc/mm/fault.c 	if (unlikely(!vma))
vma               539 arch/powerpc/mm/fault.c 	if (likely(vma->vm_start <= address))
vma               541 arch/powerpc/mm/fault.c 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
vma               545 arch/powerpc/mm/fault.c 	if (unlikely(bad_stack_expansion(regs, address, vma, flags,
vma               558 arch/powerpc/mm/fault.c 	if (unlikely(expand_stack(vma, address)))
vma               562 arch/powerpc/mm/fault.c 	if (unlikely(access_error(is_write, is_exec, vma)))
vma               570 arch/powerpc/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               578 arch/powerpc/mm/fault.c 		!arch_vma_access_permitted(vma, is_write, is_exec, 0)) {
vma               580 arch/powerpc/mm/fault.c 		int pkey = vma_pkey(vma);
vma               496 arch/powerpc/mm/hugetlbpage.c struct page *follow_huge_pd(struct vm_area_struct *vma,
vma               505 arch/powerpc/mm/hugetlbpage.c 	struct mm_struct *mm = vma->vm_mm;
vma               550 arch/powerpc/mm/hugetlbpage.c unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
vma               554 arch/powerpc/mm/hugetlbpage.c 		unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
vma               558 arch/powerpc/mm/hugetlbpage.c 	return vma_kernel_pagesize(vma);
vma               568 arch/powerpc/mm/mem.c 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
vma               575 arch/powerpc/mm/mem.c void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
vma                95 arch/powerpc/mm/mmap.c 	struct vm_area_struct *vma;
vma               115 arch/powerpc/mm/mmap.c 		vma = find_vma(mm, addr);
vma               117 arch/powerpc/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               137 arch/powerpc/mm/mmap.c 	struct vm_area_struct *vma;
vma               159 arch/powerpc/mm/mmap.c 		vma = find_vma(mm, addr);
vma               161 arch/powerpc/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               126 arch/powerpc/mm/nohash/book3e_hugetlbpage.c book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
vma               138 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	mm = vma->vm_mm;
vma               140 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	psize = vma_mmu_pagesize(vma);
vma               192 arch/powerpc/mm/nohash/book3e_hugetlbpage.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
vma               194 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	if (is_vm_hugetlb_page(vma))
vma               195 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 		book3e_hugetlb_preload(vma, address, *ptep);
vma               198 arch/powerpc/mm/nohash/book3e_hugetlbpage.c void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma               200 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	struct hstate *hstate = hstate_file(vma->vm_file);
vma               203 arch/powerpc/mm/nohash/book3e_hugetlbpage.c 	__flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
vma               218 arch/powerpc/mm/nohash/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma               220 arch/powerpc/mm/nohash/tlb.c 	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
vma               335 arch/powerpc/mm/nohash/tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
vma               338 arch/powerpc/mm/nohash/tlb.c 	if (vma && is_vm_hugetlb_page(vma))
vma               339 arch/powerpc/mm/nohash/tlb.c 		flush_hugetlb_page(vma, vmaddr);
vma               342 arch/powerpc/mm/nohash/tlb.c 	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
vma               382 arch/powerpc/mm/nohash/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               387 arch/powerpc/mm/nohash/tlb.c 		flush_tlb_page(vma, start);
vma               389 arch/powerpc/mm/nohash/tlb.c 		flush_tlb_mm(vma->vm_mm);
vma               134 arch/powerpc/mm/pgtable.c static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
vma               155 arch/powerpc/mm/pgtable.c 	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
vma               208 arch/powerpc/mm/pgtable.c int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
vma               212 arch/powerpc/mm/pgtable.c 	entry = set_access_flags_filter(entry, vma, dirty);
vma               215 arch/powerpc/mm/pgtable.c 		assert_pte_locked(vma->vm_mm, address);
vma               216 arch/powerpc/mm/pgtable.c 		__ptep_set_access_flags(vma, ptep, entry,
vma               223 arch/powerpc/mm/pgtable.c int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma               233 arch/powerpc/mm/pgtable.c 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
vma               238 arch/powerpc/mm/pgtable.c 	pte = set_access_flags_filter(pte, vma, dirty);
vma               243 arch/powerpc/mm/pgtable.c 		struct hstate *h = hstate_vma(vma);
vma               247 arch/powerpc/mm/pgtable.c 		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
vma               257 arch/powerpc/mm/pgtable.c 		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
vma                89 arch/powerpc/mm/slice.c 	struct vm_area_struct *vma;
vma                93 arch/powerpc/mm/slice.c 	vma = find_vma(mm, addr);
vma                94 arch/powerpc/mm/slice.c 	return (!vma || (addr + len) <= vm_start_gap(vma));
vma                34 arch/powerpc/oprofile/cell/pr_util.h 	unsigned int vma;	/* SPU virtual memory address from elf */
vma                42 arch/powerpc/oprofile/cell/pr_util.h 	unsigned int vma;	/* SPU virtual memory address from elf */
vma                84 arch/powerpc/oprofile/cell/pr_util.h 			    unsigned int vma, const struct spu *aSpu,
vma               321 arch/powerpc/oprofile/cell/spu_task_sync.c 	struct vm_area_struct *vma;
vma               336 arch/powerpc/oprofile/cell/spu_task_sync.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               337 arch/powerpc/oprofile/cell/spu_task_sync.c 		if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
vma               339 arch/powerpc/oprofile/cell/spu_task_sync.c 		my_offset = spu_ref - vma->vm_start;
vma               340 arch/powerpc/oprofile/cell/spu_task_sync.c 		if (!vma->vm_file)
vma               344 arch/powerpc/oprofile/cell/spu_task_sync.c 			 my_offset, spu_ref, vma->vm_file);
vma               349 arch/powerpc/oprofile/cell/spu_task_sync.c 	*spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
vma               350 arch/powerpc/oprofile/cell/spu_task_sync.c 	pr_debug("got dcookie for %pD\n", vma->vm_file);
vma                33 arch/powerpc/oprofile/cell/vma_map.c vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
vma                43 arch/powerpc/oprofile/cell/vma_map.c 	u32 offset = 0x10000000 + vma;
vma                47 arch/powerpc/oprofile/cell/vma_map.c 		if (vma < map->vma || vma >= map->vma + map->size)
vma                56 arch/powerpc/oprofile/cell/vma_map.c 		offset = vma - map->vma + map->offset;
vma                64 arch/powerpc/oprofile/cell/vma_map.c vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
vma                78 arch/powerpc/oprofile/cell/vma_map.c 	new->vma = vma;
vma               268 arch/powerpc/oprofile/cell/vma_map.c 		map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
vma               225 arch/powerpc/platforms/cell/spufs/file.c 	struct vm_area_struct *vma = vmf->vma;
vma               226 arch/powerpc/platforms/cell/spufs/file.c 	struct spu_context *ctx	= vma->vm_file->private_data;
vma               241 arch/powerpc/platforms/cell/spufs/file.c 		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
vma               244 arch/powerpc/platforms/cell/spufs/file.c 		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma               247 arch/powerpc/platforms/cell/spufs/file.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
vma               254 arch/powerpc/platforms/cell/spufs/file.c static int spufs_mem_mmap_access(struct vm_area_struct *vma,
vma               258 arch/powerpc/platforms/cell/spufs/file.c 	struct spu_context *ctx = vma->vm_file->private_data;
vma               259 arch/powerpc/platforms/cell/spufs/file.c 	unsigned long offset = address - vma->vm_start;
vma               262 arch/powerpc/platforms/cell/spufs/file.c 	if (write && !(vma->vm_flags & VM_WRITE))
vma               266 arch/powerpc/platforms/cell/spufs/file.c 	if ((offset + len) > vma->vm_end)
vma               267 arch/powerpc/platforms/cell/spufs/file.c 		len = vma->vm_end - offset;
vma               282 arch/powerpc/platforms/cell/spufs/file.c static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
vma               284 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma               287 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               288 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma               290 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_mem_mmap_vmops;
vma               307 arch/powerpc/platforms/cell/spufs/file.c 	struct spu_context *ctx = vmf->vma->vm_file->private_data;
vma               346 arch/powerpc/platforms/cell/spufs/file.c 		ret = vmf_insert_pfn(vmf->vma, vmf->address,
vma               372 arch/powerpc/platforms/cell/spufs/file.c static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
vma               374 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma               377 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               378 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               380 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_cntl_mmap_vmops;
vma              1052 arch/powerpc/platforms/cell/spufs/file.c static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
vma              1054 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma              1057 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma              1058 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1060 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_signal1_mmap_vmops;
vma              1190 arch/powerpc/platforms/cell/spufs/file.c static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
vma              1192 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma              1195 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma              1196 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1198 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_signal2_mmap_vmops;
vma              1313 arch/powerpc/platforms/cell/spufs/file.c static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
vma              1315 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma              1318 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma              1319 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1321 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_mss_mmap_vmops;
vma              1375 arch/powerpc/platforms/cell/spufs/file.c static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
vma              1377 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma              1380 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma              1381 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1383 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_psmap_mmap_vmops;
vma              1435 arch/powerpc/platforms/cell/spufs/file.c static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma              1437 arch/powerpc/platforms/cell/spufs/file.c 	if (!(vma->vm_flags & VM_SHARED))
vma              1440 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma              1441 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1443 arch/powerpc/platforms/cell/spufs/file.c 	vma->vm_ops = &spufs_mfc_mmap_vmops;
vma               104 arch/powerpc/platforms/powernv/opal-prd.c static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
vma               111 arch/powerpc/platforms/powernv/opal-prd.c 			vma->vm_start, vma->vm_end, vma->vm_pgoff,
vma               112 arch/powerpc/platforms/powernv/opal-prd.c 			vma->vm_flags);
vma               114 arch/powerpc/platforms/powernv/opal-prd.c 	addr = vma->vm_pgoff << PAGE_SHIFT;
vma               115 arch/powerpc/platforms/powernv/opal-prd.c 	size = vma->vm_end - vma->vm_start;
vma               121 arch/powerpc/platforms/powernv/opal-prd.c 	page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
vma               122 arch/powerpc/platforms/powernv/opal-prd.c 					 size, vma->vm_page_prot);
vma               124 arch/powerpc/platforms/powernv/opal-prd.c 	rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
vma                29 arch/riscv/include/asm/cacheflush.h static inline void flush_cache_range(struct vm_area_struct *vma,
vma                35 arch/riscv/include/asm/cacheflush.h static inline void flush_cache_page(struct vm_area_struct *vma,
vma                49 arch/riscv/include/asm/cacheflush.h static inline void flush_icache_page(struct vm_area_struct *vma,
vma                62 arch/riscv/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                65 arch/riscv/include/asm/cacheflush.h 		flush_icache_user_range(vma, page, vaddr, len); \
vma                67 arch/riscv/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                88 arch/riscv/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
vma               302 arch/riscv/include/asm/pgtable.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma               349 arch/riscv/include/asm/pgtable.h static inline int ptep_set_access_flags(struct vm_area_struct *vma,
vma               354 arch/riscv/include/asm/pgtable.h 		set_pte_at(vma->vm_mm, address, ptep, entry);
vma               370 arch/riscv/include/asm/pgtable.h static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma               387 arch/riscv/include/asm/pgtable.h static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
vma               405 arch/riscv/include/asm/pgtable.h 	return ptep_test_and_clear_young(vma, address, ptep);
vma                27 arch/riscv/include/asm/tlbflush.h void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
vma                28 arch/riscv/include/asm/tlbflush.h void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                32 arch/riscv/include/asm/tlbflush.h #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
vma                34 arch/riscv/include/asm/tlbflush.h static inline void flush_tlb_range(struct vm_area_struct *vma,
vma                90 arch/riscv/kernel/vdso.c const char *arch_vma_name(struct vm_area_struct *vma)
vma                92 arch/riscv/kernel/vdso.c 	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
vma                30 arch/riscv/mm/fault.c 	struct vm_area_struct *vma;
vma                73 arch/riscv/mm/fault.c 	vma = find_vma(mm, addr);
vma                74 arch/riscv/mm/fault.c 	if (unlikely(!vma))
vma                76 arch/riscv/mm/fault.c 	if (likely(vma->vm_start <= addr))
vma                78 arch/riscv/mm/fault.c 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
vma                80 arch/riscv/mm/fault.c 	if (unlikely(expand_stack(vma, addr)))
vma                92 arch/riscv/mm/fault.c 		if (!(vma->vm_flags & VM_EXEC))
vma                96 arch/riscv/mm/fault.c 		if (!(vma->vm_flags & VM_READ))
vma               100 arch/riscv/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               113 arch/riscv/mm/fault.c 	fault = handle_mm_fault(vma, addr, flags);
vma                26 arch/riscv/mm/tlbflush.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma                28 arch/riscv/mm/tlbflush.c 	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
vma                31 arch/riscv/mm/tlbflush.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                34 arch/riscv/mm/tlbflush.c 	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
vma                59 arch/s390/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                62 arch/s390/include/asm/hugetlb.h 	huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
vma                65 arch/s390/include/asm/hugetlb.h static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma                71 arch/s390/include/asm/hugetlb.h 		huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
vma                72 arch/s390/include/asm/hugetlb.h 		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
vma                71 arch/s390/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
vma                72 arch/s390/include/asm/page.h 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
vma                48 arch/s390/include/asm/pgtable.h #define update_mmu_cache(vma, address, ptep)     do { } while (0)
vma                49 arch/s390/include/asm/pgtable.h #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
vma              1060 arch/s390/include/asm/pgtable.h static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma              1065 arch/s390/include/asm/pgtable.h 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
vma              1070 arch/s390/include/asm/pgtable.h static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
vma              1073 arch/s390/include/asm/pgtable.h 	return ptep_test_and_clear_young(vma, address, ptep);
vma              1089 arch/s390/include/asm/pgtable.h static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
vma              1092 arch/s390/include/asm/pgtable.h 	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
vma              1126 arch/s390/include/asm/pgtable.h static inline int ptep_set_access_flags(struct vm_area_struct *vma,
vma              1132 arch/s390/include/asm/pgtable.h 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
vma              1512 arch/s390/include/asm/pgtable.h static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
vma              1523 arch/s390/include/asm/pgtable.h 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
vma              1528 arch/s390/include/asm/pgtable.h static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma              1533 arch/s390/include/asm/pgtable.h 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
vma              1538 arch/s390/include/asm/pgtable.h static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
vma              1542 arch/s390/include/asm/pgtable.h 	return pmdp_test_and_clear_young(vma, addr, pmdp);
vma              1582 arch/s390/include/asm/pgtable.h static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
vma              1585 arch/s390/include/asm/pgtable.h 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
vma              1589 arch/s390/include/asm/pgtable.h static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
vma              1594 arch/s390/include/asm/pgtable.h 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
vma              1607 arch/s390/include/asm/pgtable.h static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
vma              1611 arch/s390/include/asm/pgtable.h 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
vma                30 arch/s390/include/asm/tlb.h #define tlb_start_vma(tlb, vma)			do { } while (0)
vma                31 arch/s390/include/asm/tlb.h #define tlb_end_vma(tlb, vma)			do { } while (0)
vma               116 arch/s390/include/asm/tlbflush.h #define flush_tlb_page(vma, addr)		do { } while (0)
vma               123 arch/s390/include/asm/tlbflush.h static inline void flush_tlb_range(struct vm_area_struct *vma,
vma               126 arch/s390/include/asm/tlbflush.h 	__tlb_flush_mm_lazy(vma->vm_mm);
vma               239 arch/s390/kernel/crash_dump.c static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
vma               248 arch/s390/kernel/crash_dump.c 		rc = remap_pfn_range(vma, from,
vma               257 arch/s390/kernel/crash_dump.c 	return remap_pfn_range(vma, from, pfn, size, prot);
vma               266 arch/s390/kernel/crash_dump.c static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
vma               282 arch/s390/kernel/crash_dump.c 	return remap_pfn_range(vma, from, pfn, size, prot);
vma               288 arch/s390/kernel/crash_dump.c int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
vma               292 arch/s390/kernel/crash_dump.c 		return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
vma               294 arch/s390/kernel/crash_dump.c 		return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
vma                51 arch/s390/kernel/vdso.c 		      struct vm_area_struct *vma, struct vm_fault *vmf)
vma                59 arch/s390/kernel/vdso.c 	if (vma->vm_mm->context.compat_mm) {
vma                74 arch/s390/kernel/vdso.c 		       struct vm_area_struct *vma)
vma                80 arch/s390/kernel/vdso.c 	if (vma->vm_mm->context.compat_mm)
vma                84 arch/s390/kernel/vdso.c 	if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
vma                87 arch/s390/kernel/vdso.c 	if (WARN_ON_ONCE(current->mm != vma->vm_mm))
vma                90 arch/s390/kernel/vdso.c 	current->mm->context.vdso_base = vma->vm_start;
vma               204 arch/s390/kernel/vdso.c 	struct vm_area_struct *vma;
vma               248 arch/s390/kernel/vdso.c 	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
vma               252 arch/s390/kernel/vdso.c 	if (IS_ERR(vma)) {
vma               253 arch/s390/kernel/vdso.c 		rc = PTR_ERR(vma);
vma               390 arch/s390/mm/fault.c 	struct vm_area_struct *vma;
vma               456 arch/s390/mm/fault.c 	vma = find_vma(mm, address);
vma               457 arch/s390/mm/fault.c 	if (!vma)
vma               460 arch/s390/mm/fault.c 	if (unlikely(vma->vm_start > address)) {
vma               461 arch/s390/mm/fault.c 		if (!(vma->vm_flags & VM_GROWSDOWN))
vma               463 arch/s390/mm/fault.c 		if (expand_stack(vma, address))
vma               472 arch/s390/mm/fault.c 	if (unlikely(!(vma->vm_flags & access)))
vma               475 arch/s390/mm/fault.c 	if (is_vm_hugetlb_page(vma))
vma               482 arch/s390/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma               697 arch/s390/mm/gmap.c 	struct vm_area_struct *vma;
vma               710 arch/s390/mm/gmap.c 		vma = find_vma(gmap->mm, vmaddr);
vma               711 arch/s390/mm/gmap.c 		if (!vma)
vma               717 arch/s390/mm/gmap.c 		if (is_vm_hugetlb_page(vma))
vma               720 arch/s390/mm/gmap.c 		zap_page_range(vma, vmaddr, size);
vma              2491 arch/s390/mm/gmap.c 	struct vm_area_struct *vma;
vma              2494 arch/s390/mm/gmap.c 	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
vma              2495 arch/s390/mm/gmap.c 		for (addr = vma->vm_start;
vma              2496 arch/s390/mm/gmap.c 		     addr < vma->vm_end;
vma              2498 arch/s390/mm/gmap.c 			follow_page(vma, addr, FOLL_SPLIT);
vma              2499 arch/s390/mm/gmap.c 		vma->vm_flags &= ~VM_HUGEPAGE;
vma              2500 arch/s390/mm/gmap.c 		vma->vm_flags |= VM_NOHUGEPAGE;
vma              2601 arch/s390/mm/gmap.c 	struct vm_area_struct *vma;
vma              2609 arch/s390/mm/gmap.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              2610 arch/s390/mm/gmap.c 		if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
vma              2611 arch/s390/mm/gmap.c 				MADV_UNMERGEABLE, &vma->vm_flags)) {
vma               331 arch/s390/mm/hugetlbpage.c 	struct vm_area_struct *vma;
vma               347 arch/s390/mm/hugetlbpage.c 		vma = find_vma(mm, addr);
vma               349 arch/s390/mm/hugetlbpage.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                80 arch/s390/mm/mmap.c 	struct vm_area_struct *vma;
vma                92 arch/s390/mm/mmap.c 		vma = find_vma(mm, addr);
vma                94 arch/s390/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               127 arch/s390/mm/mmap.c 	struct vm_area_struct *vma;
vma               143 arch/s390/mm/mmap.c 		vma = find_vma(mm, addr);
vma               145 arch/s390/mm/mmap.c 				(!vma || addr + len <= vm_start_gap(vma)))
vma               304 arch/s390/mm/pgtable.c pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
vma               310 arch/s390/mm/pgtable.c 	struct mm_struct *mm = vma->vm_mm;
vma               323 arch/s390/mm/pgtable.c void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
vma               327 arch/s390/mm/pgtable.c 	struct mm_struct *mm = vma->vm_mm;
vma               125 arch/s390/pci/pci_mmio.c 	struct vm_area_struct *vma;
vma               130 arch/s390/pci/pci_mmio.c 	vma = find_vma(current->mm, user_addr);
vma               131 arch/s390/pci/pci_mmio.c 	if (!vma)
vma               134 arch/s390/pci/pci_mmio.c 	if (!(vma->vm_flags & access))
vma               136 arch/s390/pci/pci_mmio.c 	ret = follow_pfn(vma, user_addr, pfn);
vma                42 arch/sh/include/asm/cacheflush.h extern void flush_cache_page(struct vm_area_struct *vma,
vma                44 arch/sh/include/asm/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma,
vma                49 arch/sh/include/asm/cacheflush.h extern void flush_icache_page(struct vm_area_struct *vma,
vma                54 arch/sh/include/asm/cacheflush.h 	struct vm_area_struct *vma;
vma                61 arch/sh/include/asm/cacheflush.h static inline void flush_anon_page(struct vm_area_struct *vma,
vma                82 arch/sh/include/asm/cacheflush.h extern void copy_to_user_page(struct vm_area_struct *vma,
vma                86 arch/sh/include/asm/cacheflush.h extern void copy_from_user_page(struct vm_area_struct *vma,
vma                 9 arch/sh/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                12 arch/sh/include/asm/fb.h 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma                30 arch/sh/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                69 arch/sh/include/asm/page.h 			       unsigned long vaddr, struct vm_area_struct *vma);
vma               129 arch/sh/include/asm/pgtable.h extern void __update_cache(struct vm_area_struct *vma,
vma               131 arch/sh/include/asm/pgtable.h extern void __update_tlb(struct vm_area_struct *vma,
vma               135 arch/sh/include/asm/pgtable.h update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
vma               138 arch/sh/include/asm/pgtable.h 	__update_cache(vma, address, pte);
vma               139 arch/sh/include/asm/pgtable.h 	__update_tlb(vma, address, pte);
vma                21 arch/sh/include/asm/tlb.h static inline void tlb_wire_entry(struct vm_area_struct *vma ,
vma                16 arch/sh/include/asm/tlbflush.h extern void local_flush_tlb_range(struct vm_area_struct *vma,
vma                19 arch/sh/include/asm/tlbflush.h extern void local_flush_tlb_page(struct vm_area_struct *vma,
vma                31 arch/sh/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                33 arch/sh/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma                41 arch/sh/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
vma                44 arch/sh/include/asm/tlbflush.h #define flush_tlb_range(vma, start, end)	\
vma                45 arch/sh/include/asm/tlbflush.h 	local_flush_tlb_range(vma, start, end)
vma               104 arch/sh/kernel/cpu/sh4/sq.c 	struct vm_struct *vma;
vma               106 arch/sh/kernel/cpu/sh4/sq.c 	vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
vma               107 arch/sh/kernel/cpu/sh4/sq.c 	if (!vma)
vma               110 arch/sh/kernel/cpu/sh4/sq.c 	vma->phys_addr = map->addr;
vma               112 arch/sh/kernel/cpu/sh4/sq.c 	if (ioremap_page_range((unsigned long)vma->addr,
vma               113 arch/sh/kernel/cpu/sh4/sq.c 			       (unsigned long)vma->addr + map->size,
vma               114 arch/sh/kernel/cpu/sh4/sq.c 			       vma->phys_addr, prot)) {
vma               115 arch/sh/kernel/cpu/sh4/sq.c 		vunmap(vma->addr);
vma               229 arch/sh/kernel/cpu/sh4/sq.c 		struct vm_struct *vma;
vma               231 arch/sh/kernel/cpu/sh4/sq.c 		vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
vma               232 arch/sh/kernel/cpu/sh4/sq.c 		if (!vma) {
vma               378 arch/sh/kernel/smp.c 	struct vm_area_struct *vma;
vma               387 arch/sh/kernel/smp.c 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
vma               390 arch/sh/kernel/smp.c void flush_tlb_range(struct vm_area_struct *vma,
vma               393 arch/sh/kernel/smp.c 	struct mm_struct *mm = vma->vm_mm;
vma               399 arch/sh/kernel/smp.c 		fd.vma = vma;
vma               409 arch/sh/kernel/smp.c 	local_flush_tlb_range(vma, start, end);
vma               433 arch/sh/kernel/smp.c 	local_flush_tlb_page(fd->vma, fd->addr1);
vma               436 arch/sh/kernel/smp.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               439 arch/sh/kernel/smp.c 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
vma               440 arch/sh/kernel/smp.c 	    (current->mm != vma->vm_mm)) {
vma               443 arch/sh/kernel/smp.c 		fd.vma = vma;
vma               450 arch/sh/kernel/smp.c 				cpu_context(i, vma->vm_mm) = 0;
vma               452 arch/sh/kernel/smp.c 	local_flush_tlb_page(vma, page);
vma                60 arch/sh/kernel/sys_sh.c 	struct vm_area_struct *vma;
vma                73 arch/sh/kernel/sys_sh.c 	vma = find_vma (current->mm, addr);
vma                74 arch/sh/kernel/sys_sh.c 	if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
vma                87 arch/sh/kernel/vsyscall/vsyscall.c const char *arch_vma_name(struct vm_area_struct *vma)
vma                89 arch/sh/kernel/vsyscall/vsyscall.c 	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
vma               207 arch/sh/mm/cache-sh4.c 	struct vm_area_struct *vma;
vma               217 arch/sh/mm/cache-sh4.c 	vma = data->vma;
vma               223 arch/sh/mm/cache-sh4.c 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
vma               226 arch/sh/mm/cache-sh4.c 	pgd = pgd_offset(vma->vm_mm, address);
vma               235 arch/sh/mm/cache-sh4.c 	if ((vma->vm_mm == current->active_mm))
vma               256 arch/sh/mm/cache-sh4.c 	if (vma->vm_flags & VM_EXEC)
vma               279 arch/sh/mm/cache-sh4.c 	struct vm_area_struct *vma;
vma               282 arch/sh/mm/cache-sh4.c 	vma = data->vma;
vma               286 arch/sh/mm/cache-sh4.c 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
vma               298 arch/sh/mm/cache-sh4.c 	if (vma->vm_flags & VM_EXEC)
vma                87 arch/sh/mm/cache-sh5.c static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
vma               113 arch/sh/mm/cache-sh5.c 	vma_asid = cpu_asid(cpu, vma->vm_mm);
vma               177 arch/sh/mm/cache-sh5.c 			struct vm_area_struct *vma;
vma               179 arch/sh/mm/cache-sh5.c 			vma = find_vma(mm, aligned_start);
vma               180 arch/sh/mm/cache-sh5.c 			if (!vma || (aligned_start <= vma->vm_end)) {
vma               185 arch/sh/mm/cache-sh5.c 			vma_end = vma->vm_end;
vma               186 arch/sh/mm/cache-sh5.c 			if (vma->vm_flags & VM_EXEC) {
vma               190 arch/sh/mm/cache-sh5.c 					sh64_icache_inv_user_page(vma, eaddr);
vma               194 arch/sh/mm/cache-sh5.c 			aligned_start = vma->vm_end; /* Skip to start of next region */
vma               528 arch/sh/mm/cache-sh5.c 	struct vm_area_struct *vma;
vma               531 arch/sh/mm/cache-sh5.c 	vma = data->vma;
vma               535 arch/sh/mm/cache-sh5.c 	sh64_dcache_purge_user_range(vma->vm_mm, start, end);
vma               536 arch/sh/mm/cache-sh5.c 	sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
vma               551 arch/sh/mm/cache-sh5.c 	struct vm_area_struct *vma;
vma               554 arch/sh/mm/cache-sh5.c 	vma = data->vma;
vma               560 arch/sh/mm/cache-sh5.c 	if (vma->vm_flags & VM_EXEC)
vma               561 arch/sh/mm/cache-sh5.c 		sh64_icache_inv_user_page(vma, eaddr);
vma                60 arch/sh/mm/cache.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma                75 arch/sh/mm/cache.c 	if (vma->vm_flags & VM_EXEC)
vma                76 arch/sh/mm/cache.c 		flush_cache_page(vma, vaddr, page_to_pfn(page));
vma                79 arch/sh/mm/cache.c void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
vma                96 arch/sh/mm/cache.c 			unsigned long vaddr, struct vm_area_struct *vma)
vma               114 arch/sh/mm/cache.c 	    (vma->vm_flags & VM_EXEC))
vma               136 arch/sh/mm/cache.c void __update_cache(struct vm_area_struct *vma,
vma               193 arch/sh/mm/cache.c void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
vma               198 arch/sh/mm/cache.c 	data.vma = vma;
vma               205 arch/sh/mm/cache.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma               210 arch/sh/mm/cache.c 	data.vma = vma;
vma               228 arch/sh/mm/cache.c 	data.vma = NULL;
vma               236 arch/sh/mm/cache.c void flush_icache_page(struct vm_area_struct *vma, struct page *page)
vma               343 arch/sh/mm/fault.c static inline int access_error(int error_code, struct vm_area_struct *vma)
vma               347 arch/sh/mm/fault.c 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
vma               354 arch/sh/mm/fault.c 		     !(vma->vm_flags & VM_EXEC)))
vma               358 arch/sh/mm/fault.c 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
vma               381 arch/sh/mm/fault.c 	struct vm_area_struct * vma;
vma               429 arch/sh/mm/fault.c 	vma = find_vma(mm, address);
vma               430 arch/sh/mm/fault.c 	if (unlikely(!vma)) {
vma               434 arch/sh/mm/fault.c 	if (likely(vma->vm_start <= address))
vma               436 arch/sh/mm/fault.c 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
vma               440 arch/sh/mm/fault.c 	if (unlikely(expand_stack(vma, address))) {
vma               450 arch/sh/mm/fault.c 	if (unlikely(access_error(error_code, vma))) {
vma               467 arch/sh/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                38 arch/sh/mm/mmap.c 	struct vm_area_struct *vma;
vma                65 arch/sh/mm/mmap.c 		vma = find_vma(mm, addr);
vma                67 arch/sh/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                85 arch/sh/mm/mmap.c 	struct vm_area_struct *vma;
vma               115 arch/sh/mm/mmap.c 		vma = find_vma(mm, addr);
vma               117 arch/sh/mm/mmap.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma                48 arch/sh/mm/nommu.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                54 arch/sh/mm/nommu.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma                73 arch/sh/mm/nommu.c void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma                18 arch/sh/mm/tlb-pteaex.c void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma                25 arch/sh/mm/tlb-pteaex.c 	if (vma && current->active_mm != vma->vm_mm)
vma                28 arch/sh/mm/tlb-sh3.c void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma                35 arch/sh/mm/tlb-sh3.c 	if (vma && current->active_mm != vma->vm_mm)
vma                16 arch/sh/mm/tlb-sh4.c void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma                23 arch/sh/mm/tlb-sh4.c 	if (vma && current->active_mm != vma->vm_mm)
vma               150 arch/sh/mm/tlb-sh5.c void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
vma               186 arch/sh/mm/tlb-sh5.c void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
vma                20 arch/sh/mm/tlb-urb.c void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
vma                47 arch/sh/mm/tlb-urb.c 	__update_tlb(vma, addr, pte);
vma                15 arch/sh/mm/tlbflush_32.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma                19 arch/sh/mm/tlbflush_32.c 	if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
vma                24 arch/sh/mm/tlbflush_32.c 		asid = cpu_asid(cpu, vma->vm_mm);
vma                28 arch/sh/mm/tlbflush_32.c 		if (vma->vm_mm != current->mm) {
vma                39 arch/sh/mm/tlbflush_32.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                42 arch/sh/mm/tlbflush_32.c 	struct mm_struct *mm = vma->vm_mm;
vma                67 arch/sh/mm/tlbflush_64.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma                71 arch/sh/mm/tlbflush_64.c 	if (vma->vm_mm) {
vma                79 arch/sh/mm/tlbflush_64.c void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma                88 arch/sh/mm/tlbflush_64.c 	mm = vma->vm_mm;
vma                13 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_range(vma,start,end) \
vma                14 arch/sparc/include/asm/cacheflush_32.h 	sparc32_cachetlb_ops->cache_range(vma, start, end)
vma                15 arch/sparc/include/asm/cacheflush_32.h #define flush_cache_page(vma,addr,pfn) \
vma                16 arch/sparc/include/asm/cacheflush_32.h 	sparc32_cachetlb_ops->cache_page(vma, addr)
vma                18 arch/sparc/include/asm/cacheflush_32.h #define flush_icache_page(vma, pg)		do { } while (0)
vma                20 arch/sparc/include/asm/cacheflush_32.h #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
vma                22 arch/sparc/include/asm/cacheflush_32.h #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
vma                24 arch/sparc/include/asm/cacheflush_32.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));\
vma                27 arch/sparc/include/asm/cacheflush_32.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                29 arch/sparc/include/asm/cacheflush_32.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));\
vma                24 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_range(vma, start, end) \
vma                25 arch/sparc/include/asm/cacheflush_64.h 	flush_cache_mm((vma)->vm_mm)
vma                26 arch/sparc/include/asm/cacheflush_64.h #define flush_cache_page(vma, page, pfn) \
vma                27 arch/sparc/include/asm/cacheflush_64.h 	flush_cache_mm((vma)->vm_mm)
vma                51 arch/sparc/include/asm/cacheflush_64.h #define flush_icache_page(vma, pg)	do { } while(0)
vma                52 arch/sparc/include/asm/cacheflush_64.h #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
vma                58 arch/sparc/include/asm/cacheflush_64.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
vma                60 arch/sparc/include/asm/cacheflush_64.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));	\
vma                62 arch/sparc/include/asm/cacheflush_64.h 		flush_ptrace_access(vma, page, vaddr, src, len, 0);	\
vma                65 arch/sparc/include/asm/cacheflush_64.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) 		\
vma                67 arch/sparc/include/asm/cacheflush_64.h 		flush_cache_page(vma, vaddr, page_to_pfn(page));	\
vma                69 arch/sparc/include/asm/cacheflush_64.h 		flush_ptrace_access(vma, page, vaddr, dst, len, 1);	\
vma                10 arch/sparc/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                14 arch/sparc/include/asm/fb.h 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                30 arch/sparc/include/asm/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                44 arch/sparc/include/asm/hugetlb.h static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma                50 arch/sparc/include/asm/hugetlb.h 		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
vma                51 arch/sparc/include/asm/hugetlb.h 		flush_tlb_page(vma, addr);
vma               207 arch/sparc/include/asm/leon.h void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
vma                65 arch/sparc/include/asm/mman.h 			struct vm_area_struct *vma;
vma                67 arch/sparc/include/asm/mman.h 			vma = find_vma(current->mm, addr);
vma                68 arch/sparc/include/asm/mman.h 			if (vma) {
vma                72 arch/sparc/include/asm/mman.h 				if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
vma                83 arch/sparc/include/asm/mman.h 				if (vma->vm_flags & VM_MERGEABLE)
vma                54 arch/sparc/include/asm/page_64.h 			unsigned long vaddr, struct vm_area_struct *vma);
vma               345 arch/sparc/include/asm/pgtable_32.h #define update_mmu_cache(vma, address, ptep) do { } while (0)
vma               413 arch/sparc/include/asm/pgtable_32.h static inline int io_remap_pfn_range(struct vm_area_struct *vma,
vma               423 arch/sparc/include/asm/pgtable_32.h 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
vma               380 arch/sparc/include/asm/pgtable_64.h extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
vma               994 arch/sparc/include/asm/pgtable_64.h void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               998 arch/sparc/include/asm/pgtable_64.h extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
vma              1034 arch/sparc/include/asm/pgtable_64.h void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
vma              1037 arch/sparc/include/asm/pgtable_64.h int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
vma              1042 arch/sparc/include/asm/pgtable_64.h 				     struct vm_area_struct *vma,
vma              1054 arch/sparc/include/asm/pgtable_64.h 		adi_restore_tags(mm, vma, addr, pte);
vma              1059 arch/sparc/include/asm/pgtable_64.h 				 struct vm_area_struct *vma,
vma              1063 arch/sparc/include/asm/pgtable_64.h 		return adi_save_tags(mm, vma, addr, oldpte);
vma              1067 arch/sparc/include/asm/pgtable_64.h static inline int io_remap_pfn_range(struct vm_area_struct *vma,
vma              1077 arch/sparc/include/asm/pgtable_64.h 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
vma                26 arch/sparc/include/asm/tlb_64.h #define tlb_start_vma(tlb, vma) do { } while (0)
vma                27 arch/sparc/include/asm/tlb_64.h #define tlb_end_vma(tlb, vma)	do { } while (0)
vma                11 arch/sparc/include/asm/tlbflush_32.h #define flush_tlb_range(vma, start, end) \
vma                12 arch/sparc/include/asm/tlbflush_32.h 	sparc32_cachetlb_ops->tlb_range(vma, start, end)
vma                13 arch/sparc/include/asm/tlbflush_32.h #define flush_tlb_page(vma, addr) \
vma                14 arch/sparc/include/asm/tlbflush_32.h 	sparc32_cachetlb_ops->tlb_page(vma, addr)
vma                30 arch/sparc/include/asm/tlbflush_64.h static inline void flush_tlb_page(struct vm_area_struct *vma,
vma                35 arch/sparc/include/asm/tlbflush_64.h static inline void flush_tlb_range(struct vm_area_struct *vma,
vma               125 arch/sparc/kernel/adi_64.c 				   struct vm_area_struct *vma,
vma               157 arch/sparc/kernel/adi_64.c 				    struct vm_area_struct *vma,
vma               327 arch/sparc/kernel/adi_64.c void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
vma               338 arch/sparc/kernel/adi_64.c 	tag_desc = find_tag_store(mm, vma, addr);
vma               370 arch/sparc/kernel/adi_64.c int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
vma               377 arch/sparc/kernel/adi_64.c 	tag_desc = alloc_tag_store(mm, vma, addr);
vma               763 arch/sparc/kernel/pci.c static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
vma               776 arch/sparc/kernel/pci.c 	user_offset = vma->vm_pgoff << PAGE_SHIFT;
vma               777 arch/sparc/kernel/pci.c 	user_size = vma->vm_end - vma->vm_start;
vma               784 arch/sparc/kernel/pci.c 		vma->vm_pgoff = (pbm->io_space.start +
vma               787 arch/sparc/kernel/pci.c 		vma->vm_pgoff = (pbm->mem_space.start +
vma               805 arch/sparc/kernel/pci.c 				  struct vm_area_struct *vma,
vma               815 arch/sparc/kernel/pci.c 	err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
vma               828 arch/sparc/kernel/pci.c 	user_paddr = vma->vm_pgoff << PAGE_SHIFT;
vma               829 arch/sparc/kernel/pci.c 	user_size = vma->vm_end - vma->vm_start;
vma               872 arch/sparc/kernel/pci.c static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
vma               887 arch/sparc/kernel/pci.c 			struct vm_area_struct *vma,
vma               892 arch/sparc/kernel/pci.c 	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
vma               896 arch/sparc/kernel/pci.c 	__pci_mmap_set_pgprot(dev, vma, mmap_state);
vma               898 arch/sparc/kernel/pci.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               899 arch/sparc/kernel/pci.c 	ret = io_remap_pfn_range(vma, vma->vm_start,
vma               900 arch/sparc/kernel/pci.c 				 vma->vm_pgoff,
vma               901 arch/sparc/kernel/pci.c 				 vma->vm_end - vma->vm_start,
vma               902 arch/sparc/kernel/pci.c 				 vma->vm_page_prot);
vma               108 arch/sparc/kernel/ptrace_64.c void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vma                93 arch/sparc/kernel/sys_sparc_64.c 	struct vm_area_struct * vma;
vma               123 arch/sparc/kernel/sys_sparc_64.c 		vma = find_vma(mm, addr);
vma               125 arch/sparc/kernel/sys_sparc_64.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               152 arch/sparc/kernel/sys_sparc_64.c 	struct vm_area_struct *vma;
vma               186 arch/sparc/kernel/sys_sparc_64.c 		vma = find_vma(mm, addr);
vma               188 arch/sparc/kernel/sys_sparc_64.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               163 arch/sparc/mm/fault_32.c 	struct vm_area_struct *vma;
vma               204 arch/sparc/mm/fault_32.c 	vma = find_vma(mm, address);
vma               205 arch/sparc/mm/fault_32.c 	if (!vma)
vma               207 arch/sparc/mm/fault_32.c 	if (vma->vm_start <= address)
vma               209 arch/sparc/mm/fault_32.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               211 arch/sparc/mm/fault_32.c 	if (expand_stack(vma, address))
vma               220 arch/sparc/mm/fault_32.c 		if (!(vma->vm_flags & VM_WRITE))
vma               224 arch/sparc/mm/fault_32.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma               238 arch/sparc/mm/fault_32.c 	fault = handle_mm_fault(vma, address, flags);
vma               380 arch/sparc/mm/fault_32.c 	struct vm_area_struct *vma;
vma               389 arch/sparc/mm/fault_32.c 	vma = find_vma(mm, address);
vma               390 arch/sparc/mm/fault_32.c 	if (!vma)
vma               392 arch/sparc/mm/fault_32.c 	if (vma->vm_start <= address)
vma               394 arch/sparc/mm/fault_32.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               396 arch/sparc/mm/fault_32.c 	if (expand_stack(vma, address))
vma               401 arch/sparc/mm/fault_32.c 		if (!(vma->vm_flags & VM_WRITE))
vma               405 arch/sparc/mm/fault_32.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma               408 arch/sparc/mm/fault_32.c 	switch (handle_mm_fault(vma, address, flags)) {
vma               265 arch/sparc/mm/fault_64.c 	struct vm_area_struct *vma;
vma               332 arch/sparc/mm/fault_64.c 	vma = find_vma(mm, address);
vma               333 arch/sparc/mm/fault_64.c 	if (!vma)
vma               346 arch/sparc/mm/fault_64.c 	    (vma->vm_flags & VM_WRITE) != 0) {
vma               365 arch/sparc/mm/fault_64.c 	if (vma->vm_start <= address)
vma               367 arch/sparc/mm/fault_64.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma               383 arch/sparc/mm/fault_64.c 	if (expand_stack(vma, address))
vma               395 arch/sparc/mm/fault_64.c 	if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
vma               403 arch/sparc/mm/fault_64.c 		if (!(vma->vm_flags & VM_WRITE))
vma               410 arch/sparc/mm/fault_64.c 		    (vma->vm_flags & VM_EXEC) != 0 &&
vma               411 arch/sparc/mm/fault_64.c 		    vma->vm_file != NULL)
vma               418 arch/sparc/mm/fault_64.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma               422 arch/sparc/mm/fault_64.c 	fault = handle_mm_fault(vma, address, flags);
vma               103 arch/sparc/mm/hugetlbpage.c 	struct vm_area_struct *vma;
vma               122 arch/sparc/mm/hugetlbpage.c 		vma = find_vma(mm, addr);
vma               124 arch/sparc/mm/hugetlbpage.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               181 arch/sparc/mm/hugetlbpage.c pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
vma               184 arch/sparc/mm/hugetlbpage.c 	unsigned int shift = huge_page_shift(hstate_vma(vma));
vma               192 arch/sparc/mm/hugetlbpage.c 	if (vma->vm_flags & VM_SPARC_ADI)
vma               416 arch/sparc/mm/init_64.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
vma               430 arch/sparc/mm/init_64.c 	mm = vma->vm_mm;
vma               443 arch/sparc/mm/init_64.c 		if (is_vm_hugetlb_page(vma))
vma               444 arch/sparc/mm/init_64.c 			hugepage_size = huge_page_size(hstate_vma(vma));
vma              2940 arch/sparc/mm/init_64.c void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
vma              2959 arch/sparc/mm/init_64.c 	mm = vma->vm_mm;
vma              3129 arch/sparc/mm/init_64.c 	unsigned long vaddr, struct vm_area_struct *vma)
vma              3142 arch/sparc/mm/init_64.c 	if (vma->vm_flags & VM_SPARC_ADI) {
vma               194 arch/sparc/mm/leon_mm.c void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
vma               196 arch/sparc/mm/leon_mm.c 	if (vma->vm_flags & VM_EXEC)
vma               282 arch/sparc/mm/leon_mm.c static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma               284 arch/sparc/mm/leon_mm.c 	leon_flush_pcache_all(vma, page);
vma               287 arch/sparc/mm/leon_mm.c static void leon_flush_cache_range(struct vm_area_struct *vma,
vma               299 arch/sparc/mm/leon_mm.c static void leon_flush_tlb_page(struct vm_area_struct *vma,
vma               305 arch/sparc/mm/leon_mm.c static void leon_flush_tlb_range(struct vm_area_struct *vma,
vma               578 arch/sparc/mm/srmmu.c extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               579 arch/sparc/mm/srmmu.c extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
vma               585 arch/sparc/mm/srmmu.c extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               586 arch/sparc/mm/srmmu.c extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma               592 arch/sparc/mm/srmmu.c extern void swift_flush_cache_range(struct vm_area_struct *vma,
vma               594 arch/sparc/mm/srmmu.c extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
vma               600 arch/sparc/mm/srmmu.c extern void swift_flush_tlb_range(struct vm_area_struct *vma,
vma               602 arch/sparc/mm/srmmu.c extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma               605 arch/sparc/mm/srmmu.c void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               610 arch/sparc/mm/srmmu.c 	if ((ctx1 = vma->vm_mm->context) != -1) {
vma               644 arch/sparc/mm/srmmu.c extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma               646 arch/sparc/mm/srmmu.c extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
vma               654 arch/sparc/mm/srmmu.c extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               656 arch/sparc/mm/srmmu.c extern void viking_flush_tlb_page(struct vm_area_struct *vma,
vma               660 arch/sparc/mm/srmmu.c extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               662 arch/sparc/mm/srmmu.c extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
vma               668 arch/sparc/mm/srmmu.c extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               669 arch/sparc/mm/srmmu.c extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
vma               675 arch/sparc/mm/srmmu.c extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               676 arch/sparc/mm/srmmu.c extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
vma              1263 arch/sparc/mm/srmmu.c static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma              1265 arch/sparc/mm/srmmu.c 	FLUSH_BEGIN(vma->vm_mm)
vma              1271 arch/sparc/mm/srmmu.c static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma              1273 arch/sparc/mm/srmmu.c 	FLUSH_BEGIN(vma->vm_mm)
vma              1275 arch/sparc/mm/srmmu.c 	if (vma->vm_flags & VM_EXEC)
vma              1314 arch/sparc/mm/srmmu.c static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma              1316 arch/sparc/mm/srmmu.c 	FLUSH_BEGIN(vma->vm_mm)
vma              1321 arch/sparc/mm/srmmu.c static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma              1323 arch/sparc/mm/srmmu.c 	FLUSH_BEGIN(vma->vm_mm)
vma              1701 arch/sparc/mm/srmmu.c static void smp_flush_cache_range(struct vm_area_struct *vma,
vma              1705 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
vma              1713 arch/sparc/mm/srmmu.c 			    (unsigned long) vma, start, end);
vma              1714 arch/sparc/mm/srmmu.c 		local_ops->cache_range(vma, start, end);
vma              1718 arch/sparc/mm/srmmu.c static void smp_flush_tlb_range(struct vm_area_struct *vma,
vma              1722 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
vma              1730 arch/sparc/mm/srmmu.c 			    (unsigned long) vma, start, end);
vma              1731 arch/sparc/mm/srmmu.c 		local_ops->tlb_range(vma, start, end);
vma              1735 arch/sparc/mm/srmmu.c static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
vma              1737 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
vma              1745 arch/sparc/mm/srmmu.c 			    (unsigned long) vma, page);
vma              1746 arch/sparc/mm/srmmu.c 		local_ops->cache_page(vma, page);
vma              1750 arch/sparc/mm/srmmu.c static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma              1752 arch/sparc/mm/srmmu.c 	struct mm_struct *mm = vma->vm_mm;
vma              1760 arch/sparc/mm/srmmu.c 			    (unsigned long) vma, page);
vma              1761 arch/sparc/mm/srmmu.c 		local_ops->tlb_page(vma, page);
vma               228 arch/sparc/mm/tlb.c static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
vma               236 arch/sparc/mm/tlb.c 	__set_pmd_acct(vma->vm_mm, address, old, pmd);
vma               244 arch/sparc/mm/tlb.c pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
vma               250 arch/sparc/mm/tlb.c 	old = pmdp_establish(vma, address, pmdp, entry);
vma               251 arch/sparc/mm/tlb.c 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               260 arch/sparc/mm/tlb.c 		(vma->vm_mm)->context.thp_pte_count--;
vma               365 arch/sparc/vdso/vma.c 	struct vm_area_struct *vma;
vma               398 arch/sparc/vdso/vma.c 	vma = _install_special_mapping(mm,
vma               405 arch/sparc/vdso/vma.c 	if (IS_ERR(vma)) {
vma               406 arch/sparc/vdso/vma.c 		ret = PTR_ERR(vma);
vma               410 arch/sparc/vdso/vma.c 	vma = _install_special_mapping(mm,
vma               416 arch/sparc/vdso/vma.c 	if (IS_ERR(vma)) {
vma               417 arch/sparc/vdso/vma.c 		ret = PTR_ERR(vma);
vma                49 arch/um/drivers/mmapper_kern.c static int mmapper_mmap(struct file *file, struct vm_area_struct *vma)
vma                54 arch/um/drivers/mmapper_kern.c 	if (vma->vm_pgoff != 0)
vma                57 arch/um/drivers/mmapper_kern.c 	size = vma->vm_end - vma->vm_start;
vma                65 arch/um/drivers/mmapper_kern.c 	if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size,
vma                66 arch/um/drivers/mmapper_kern.c 			    vma->vm_page_prot))
vma                29 arch/um/include/asm/mmu_context.h 				     struct vm_area_struct *vma)
vma                33 arch/um/include/asm/mmu_context.h static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
vma               349 arch/um/include/asm/pgtable.h #define update_mmu_cache(vma,address,ptep) do ; while (0)
vma                24 arch/um/include/asm/tlbflush.h extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 
vma                26 arch/um/include/asm/tlbflush.h extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
vma               424 arch/um/kernel/tlb.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
vma               430 arch/um/kernel/tlb.c 	struct mm_struct *mm = vma->vm_mm;
vma               562 arch/um/kernel/tlb.c void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
vma               565 arch/um/kernel/tlb.c 	if (vma->vm_mm == NULL)
vma               567 arch/um/kernel/tlb.c 	else fix_range(vma->vm_mm, start, end, 0);
vma               579 arch/um/kernel/tlb.c 	struct vm_area_struct *vma = mm->mmap;
vma               581 arch/um/kernel/tlb.c 	while (vma != NULL) {
vma               582 arch/um/kernel/tlb.c 		fix_range(mm, vma->vm_start, vma->vm_end, 0);
vma               583 arch/um/kernel/tlb.c 		vma = vma->vm_next;
vma               590 arch/um/kernel/tlb.c 	struct vm_area_struct *vma = mm->mmap;
vma               592 arch/um/kernel/tlb.c 	while (vma != NULL) {
vma               593 arch/um/kernel/tlb.c 		fix_range(mm, vma->vm_start, vma->vm_end, 1);
vma               594 arch/um/kernel/tlb.c 		vma = vma->vm_next;
vma                29 arch/um/kernel/trap.c 	struct vm_area_struct *vma;
vma                50 arch/um/kernel/trap.c 	vma = find_vma(mm, address);
vma                51 arch/um/kernel/trap.c 	if (!vma)
vma                53 arch/um/kernel/trap.c 	else if (vma->vm_start <= address)
vma                55 arch/um/kernel/trap.c 	else if (!(vma->vm_flags & VM_GROWSDOWN))
vma                59 arch/um/kernel/trap.c 	else if (expand_stack(vma, address))
vma                65 arch/um/kernel/trap.c 		if (!(vma->vm_flags & VM_WRITE))
vma                70 arch/um/kernel/trap.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma                77 arch/um/kernel/trap.c 		fault = handle_mm_fault(vma, address, flags);
vma               123 arch/um/kernel/trap.c 	flush_tlb_page(vma, address);
vma               108 arch/unicore32/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
vma               128 arch/unicore32/include/asm/cacheflush.h extern void flush_cache_range(struct vm_area_struct *vma,
vma               130 arch/unicore32/include/asm/cacheflush.h extern void flush_cache_page(struct vm_area_struct *vma,
vma               140 arch/unicore32/include/asm/cacheflush.h #define flush_cache_user_range(vma, start, end) \
vma               173 arch/unicore32/include/asm/cacheflush.h #define flush_icache_user_range(vma, page, addr, len)	\
vma               180 arch/unicore32/include/asm/cacheflush.h #define flush_icache_page(vma, page)	do { } while (0)
vma                93 arch/unicore32/include/asm/mmu_context.h 				     struct vm_area_struct *vma)
vma                97 arch/unicore32/include/asm/mmu_context.h static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
vma                85 arch/unicore32/include/asm/tlbflush.h local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
vma                87 arch/unicore32/include/asm/tlbflush.h 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
vma               167 arch/unicore32/include/asm/tlbflush.h #define local_flush_tlb_range(vma, start, end)	\
vma               168 arch/unicore32/include/asm/tlbflush.h 	__cpu_flush_user_tlb_range(start, end, vma)
vma               184 arch/unicore32/include/asm/tlbflush.h extern void update_mmu_cache(struct vm_area_struct *vma,
vma               316 arch/unicore32/kernel/process.c const char *arch_vma_name(struct vm_area_struct *vma)
vma               318 arch/unicore32/kernel/process.c 	return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
vma               150 arch/unicore32/mm/fault.c static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
vma               159 arch/unicore32/mm/fault.c 	return vma->vm_flags & mask ? false : true;
vma               165 arch/unicore32/mm/fault.c 	struct vm_area_struct *vma;
vma               168 arch/unicore32/mm/fault.c 	vma = find_vma(mm, addr);
vma               170 arch/unicore32/mm/fault.c 	if (unlikely(!vma))
vma               172 arch/unicore32/mm/fault.c 	if (unlikely(vma->vm_start > addr))
vma               180 arch/unicore32/mm/fault.c 	if (access_error(fsr, vma)) {
vma               189 arch/unicore32/mm/fault.c 	fault = handle_mm_fault(vma, addr & PAGE_MASK, flags);
vma               193 arch/unicore32/mm/fault.c 	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
vma                20 arch/unicore32/mm/flush.c void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
vma                23 arch/unicore32/mm/flush.c 	if (vma->vm_flags & VM_EXEC)
vma                27 arch/unicore32/mm/flush.c void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
vma                32 arch/unicore32/mm/flush.c static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vma                36 arch/unicore32/mm/flush.c 	if (vma->vm_flags & VM_EXEC) {
vma                50 arch/unicore32/mm/flush.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma                55 arch/unicore32/mm/flush.c 	flush_ptrace_access(vma, page, uaddr, dst, len);
vma               489 arch/unicore32/mm/mmu.c void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
vma               511 arch/unicore32/mm/mmu.c 		if (vma->vm_flags & VM_EXEC)
vma               147 arch/x86/boot/tools/build.c static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
vma               168 arch/x86/boot/tools/build.c 			put_unaligned_le32(vma, section + 0xc);
vma                43 arch/x86/entry/vdso/vma.c 		      struct vm_area_struct *vma, struct vm_fault *vmf)
vma                45 arch/x86/entry/vdso/vma.c 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
vma                88 arch/x86/entry/vdso/vma.c 		      struct vm_area_struct *vma, struct vm_fault *vmf)
vma                90 arch/x86/entry/vdso/vma.c 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
vma               110 arch/x86/entry/vdso/vma.c 		return vmf_insert_pfn(vma, vmf->address,
vma               116 arch/x86/entry/vdso/vma.c 			return vmf_insert_pfn_prot(vma, vmf->address,
vma               118 arch/x86/entry/vdso/vma.c 					pgprot_decrypted(vma->vm_page_prot));
vma               124 arch/x86/entry/vdso/vma.c 			return vmf_insert_pfn(vma, vmf->address,
vma               149 arch/x86/entry/vdso/vma.c 	struct vm_area_struct *vma;
vma               168 arch/x86/entry/vdso/vma.c 	vma = _install_special_mapping(mm,
vma               175 arch/x86/entry/vdso/vma.c 	if (IS_ERR(vma)) {
vma               176 arch/x86/entry/vdso/vma.c 		ret = PTR_ERR(vma);
vma               180 arch/x86/entry/vdso/vma.c 	vma = _install_special_mapping(mm,
vma               187 arch/x86/entry/vdso/vma.c 	if (IS_ERR(vma)) {
vma               188 arch/x86/entry/vdso/vma.c 		ret = PTR_ERR(vma);
vma               256 arch/x86/entry/vdso/vma.c 	struct vm_area_struct *vma;
vma               266 arch/x86/entry/vdso/vma.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               267 arch/x86/entry/vdso/vma.c 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma               268 arch/x86/entry/vdso/vma.c 				vma_is_special_mapping(vma, &vvar_mapping)) {
vma               301 arch/x86/entry/vsyscall/vsyscall_64.c static const char *gate_vma_name(struct vm_area_struct *vma)
vma               329 arch/x86/entry/vsyscall/vsyscall_64.c 	struct vm_area_struct *vma = get_gate_vma(mm);
vma               331 arch/x86/entry/vsyscall/vsyscall_64.c 	if (!vma)
vma               334 arch/x86/entry/vsyscall/vsyscall_64.c 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
vma                 9 arch/x86/include/asm/fb.h static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
vma                14 arch/x86/include/asm/fb.h 	prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
vma                16 arch/x86/include/asm/fb.h 		pgprot_val(vma->vm_page_prot) =
vma               276 arch/x86/include/asm/mmu_context.h 		struct vm_area_struct *vma)
vma               314 arch/x86/include/asm/mmu_context.h static inline bool vma_is_foreign(struct vm_area_struct *vma)
vma               323 arch/x86/include/asm/mmu_context.h 	if (current->mm != vma->vm_mm)
vma               329 arch/x86/include/asm/mmu_context.h static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
vma               336 arch/x86/include/asm/mmu_context.h 	if (foreign || vma_is_foreign(vma))
vma               338 arch/x86/include/asm/mmu_context.h 	return __pkru_allows_pkey(vma_pkey(vma), write);
vma                37 arch/x86/include/asm/page.h #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
vma                38 arch/x86/include/asm/page.h 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
vma               413 arch/x86/include/asm/paravirt.h static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
vma               418 arch/x86/include/asm/paravirt.h 	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
vma               423 arch/x86/include/asm/paravirt.h static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
vma               429 arch/x86/include/asm/paravirt.h 		pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
vma               432 arch/x86/include/asm/paravirt.h 			    vma, addr, ptep, pte.pte);
vma               253 arch/x86/include/asm/paravirt_types.h 	pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
vma               255 arch/x86/include/asm/paravirt_types.h 	void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
vma               189 arch/x86/include/asm/pgtable-3level.h static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
vma              1111 arch/x86/include/asm/pgtable.h extern int ptep_set_access_flags(struct vm_area_struct *vma,
vma              1116 arch/x86/include/asm/pgtable.h extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma              1120 arch/x86/include/asm/pgtable.h extern int ptep_clear_flush_young(struct vm_area_struct *vma,
vma              1156 arch/x86/include/asm/pgtable.h #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
vma              1161 arch/x86/include/asm/pgtable.h extern int pmdp_set_access_flags(struct vm_area_struct *vma,
vma              1164 arch/x86/include/asm/pgtable.h extern int pudp_set_access_flags(struct vm_area_struct *vma,
vma              1169 arch/x86/include/asm/pgtable.h extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma              1171 arch/x86/include/asm/pgtable.h extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
vma              1175 arch/x86/include/asm/pgtable.h extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
vma              1214 arch/x86/include/asm/pgtable.h static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
vma              1331 arch/x86/include/asm/pgtable.h static inline void update_mmu_cache(struct vm_area_struct *vma,
vma              1335 arch/x86/include/asm/pgtable.h static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
vma              1339 arch/x86/include/asm/pgtable.h static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
vma                30 arch/x86/include/asm/pkeys.h extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
vma                32 arch/x86/include/asm/pkeys.h static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
vma                38 arch/x86/include/asm/pkeys.h 	return __arch_override_mprotect_pkey(vma, prot, pkey);
vma               124 arch/x86/include/asm/pkeys.h static inline int vma_pkey(struct vm_area_struct *vma)
vma               129 arch/x86/include/asm/pkeys.h 	return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
vma                 5 arch/x86/include/asm/tlb.h #define tlb_start_vma(tlb, vma) do { } while (0)
vma                 6 arch/x86/include/asm/tlb.h #define tlb_end_vma(tlb, vma) do { } while (0)
vma               571 arch/x86/include/asm/tlbflush.h #define flush_tlb_range(vma, start, end)				\
vma               572 arch/x86/include/asm/tlbflush.h 	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
vma               573 arch/x86/include/asm/tlbflush.h 			   ((vma)->vm_flags & VM_HUGETLB)		\
vma               574 arch/x86/include/asm/tlbflush.h 				? huge_page_shift(hstate_vma(vma))	\
vma               583 arch/x86/include/asm/tlbflush.h static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
vma               585 arch/x86/include/asm/tlbflush.h 	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
vma              1471 arch/x86/kernel/cpu/resctrl/pseudo_lock.c static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1473 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	unsigned long vsize = vma->vm_end - vma->vm_start;
vma              1474 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
vma              1519 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!(vma->vm_flags & VM_SHARED)) {
vma              1531 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
vma              1532 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 			    vsize, vma->vm_page_prot)) {
vma              1536 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	vma->vm_ops = &pseudo_mmap_ops;
vma               136 arch/x86/kernel/sys_x86_64.c 	struct vm_area_struct *vma;
vma               154 arch/x86/kernel/sys_x86_64.c 		vma = find_vma(mm, addr);
vma               156 arch/x86/kernel/sys_x86_64.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               178 arch/x86/kernel/sys_x86_64.c 	struct vm_area_struct *vma;
vma               205 arch/x86/kernel/sys_x86_64.c 		vma = find_vma(mm, addr);
vma               206 arch/x86/kernel/sys_x86_64.c 		if (!vma || addr + len <= vm_start_gap(vma))
vma               166 arch/x86/kernel/vm86_32.c 	struct vm_area_struct *vma;
vma               188 arch/x86/kernel/vm86_32.c 		vma = find_vma(mm, 0xA0000);
vma               189 arch/x86/kernel/vm86_32.c 		split_huge_pmd(vma, pmd, 0xA0000);
vma               148 arch/x86/kvm/paging_tmpl.h 		struct vm_area_struct *vma;
vma               154 arch/x86/kvm/paging_tmpl.h 		vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
vma               155 arch/x86/kvm/paging_tmpl.h 		if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
vma               159 arch/x86/kvm/paging_tmpl.h 		pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
vma               965 arch/x86/mm/fault.c 		struct vm_area_struct *vma)
vma               975 arch/x86/mm/fault.c 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
vma               983 arch/x86/mm/fault.c 		      unsigned long address, struct vm_area_struct *vma)
vma               990 arch/x86/mm/fault.c 	if (bad_area_access_from_pkeys(error_code, vma)) {
vma              1011 arch/x86/mm/fault.c 		u32 pkey = vma_pkey(vma);
vma              1190 arch/x86/mm/fault.c access_error(unsigned long error_code, struct vm_area_struct *vma)
vma              1208 arch/x86/mm/fault.c 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
vma              1214 arch/x86/mm/fault.c 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
vma              1224 arch/x86/mm/fault.c 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
vma              1308 arch/x86/mm/fault.c 	struct vm_area_struct *vma;
vma              1424 arch/x86/mm/fault.c 	vma = find_vma(mm, address);
vma              1425 arch/x86/mm/fault.c 	if (unlikely(!vma)) {
vma              1429 arch/x86/mm/fault.c 	if (likely(vma->vm_start <= address))
vma              1431 arch/x86/mm/fault.c 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
vma              1435 arch/x86/mm/fault.c 	if (unlikely(expand_stack(vma, address))) {
vma              1445 arch/x86/mm/fault.c 	if (unlikely(access_error(hw_error_code, vma))) {
vma              1446 arch/x86/mm/fault.c 		bad_area_access_error(regs, hw_error_code, address, vma);
vma              1463 arch/x86/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                32 arch/x86/mm/hugetlbpage.c 	struct vm_area_struct *vma;
vma                34 arch/x86/mm/hugetlbpage.c 	vma = find_vma(mm, addr);
vma                35 arch/x86/mm/hugetlbpage.c 	if (!vma || !is_vm_hugetlb_page(vma))
vma                38 arch/x86/mm/hugetlbpage.c 	pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
vma               149 arch/x86/mm/hugetlbpage.c 	struct vm_area_struct *vma;
vma               173 arch/x86/mm/hugetlbpage.c 		vma = find_vma(mm, addr);
vma               174 arch/x86/mm/hugetlbpage.c 		if (!vma || addr + len <= vm_start_gap(vma))
vma               164 arch/x86/mm/mmap.c const char *arch_vma_name(struct vm_area_struct *vma)
vma               166 arch/x86/mm/mmap.c 	if (vma->vm_flags & VM_MPX)
vma               627 arch/x86/mm/mpx.c 	struct vm_area_struct *vma;
vma               652 arch/x86/mm/mpx.c 	vma = find_vma(mm, start);
vma               653 arch/x86/mm/mpx.c 	if (!vma || vma->vm_start > start)
vma               663 arch/x86/mm/mpx.c 	while (vma && vma->vm_start < end) {
vma               670 arch/x86/mm/mpx.c 		if (!(vma->vm_flags & VM_MPX))
vma               673 arch/x86/mm/mpx.c 		len = min(vma->vm_end, end) - addr;
vma               674 arch/x86/mm/mpx.c 		zap_page_range(vma, addr, len);
vma               677 arch/x86/mm/mpx.c 		vma = vma->vm_next;
vma               678 arch/x86/mm/mpx.c 		addr = vma->vm_start;
vma               887 arch/x86/mm/mpx.c 	struct vm_area_struct *vma;
vma               906 arch/x86/mm/mpx.c 	vma = find_vma(mm, start);
vma               907 arch/x86/mm/mpx.c 	while (vma && vma->vm_start < end) {
vma               908 arch/x86/mm/mpx.c 		if (vma->vm_flags & VM_MPX)
vma               910 arch/x86/mm/mpx.c 		vma = vma->vm_next;
vma               965 arch/x86/mm/pat.c int track_pfn_copy(struct vm_area_struct *vma)
vma               969 arch/x86/mm/pat.c 	unsigned long vma_size = vma->vm_end - vma->vm_start;
vma               972 arch/x86/mm/pat.c 	if (vma->vm_flags & VM_PAT) {
vma               977 arch/x86/mm/pat.c 		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
vma               994 arch/x86/mm/pat.c int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
vma              1001 arch/x86/mm/pat.c 	if (!vma || (addr == vma->vm_start
vma              1002 arch/x86/mm/pat.c 				&& size == (vma->vm_end - vma->vm_start))) {
vma              1006 arch/x86/mm/pat.c 		if (ret == 0 && vma)
vma              1007 arch/x86/mm/pat.c 			vma->vm_flags |= VM_PAT;
vma              1034 arch/x86/mm/pat.c void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
vma              1052 arch/x86/mm/pat.c void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
vma              1058 arch/x86/mm/pat.c 	if (vma && !(vma->vm_flags & VM_PAT))
vma              1064 arch/x86/mm/pat.c 		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
vma              1069 arch/x86/mm/pat.c 		size = vma->vm_end - vma->vm_start;
vma              1072 arch/x86/mm/pat.c 	if (vma)
vma              1073 arch/x86/mm/pat.c 		vma->vm_flags &= ~VM_PAT;
vma              1081 arch/x86/mm/pat.c void untrack_pfn_moved(struct vm_area_struct *vma)
vma              1083 arch/x86/mm/pat.c 	vma->vm_flags &= ~VM_PAT;
vma               479 arch/x86/mm/pgtable.c int ptep_set_access_flags(struct vm_area_struct *vma,
vma               492 arch/x86/mm/pgtable.c int pmdp_set_access_flags(struct vm_area_struct *vma,
vma               513 arch/x86/mm/pgtable.c int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
vma               534 arch/x86/mm/pgtable.c int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma               547 arch/x86/mm/pgtable.c int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma               558 arch/x86/mm/pgtable.c int pudp_test_and_clear_young(struct vm_area_struct *vma,
vma               571 arch/x86/mm/pgtable.c int ptep_clear_flush_young(struct vm_area_struct *vma,
vma               587 arch/x86/mm/pgtable.c 	return ptep_test_and_clear_young(vma, address, ptep);
vma               591 arch/x86/mm/pgtable.c int pmdp_clear_flush_young(struct vm_area_struct *vma,
vma               598 arch/x86/mm/pgtable.c 	young = pmdp_test_and_clear_young(vma, address, pmdp);
vma               600 arch/x86/mm/pgtable.c 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma                63 arch/x86/mm/pkeys.c static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
vma                66 arch/x86/mm/pkeys.c 	if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
vma                68 arch/x86/mm/pkeys.c 	if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
vma                77 arch/x86/mm/pkeys.c int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey)
vma                93 arch/x86/mm/pkeys.c 		pkey = execute_only_pkey(vma->vm_mm);
vma                96 arch/x86/mm/pkeys.c 	} else if (vma_is_pkey_exec_only(vma)) {
vma               111 arch/x86/mm/pkeys.c 	return vma_pkey(vma);
vma                44 arch/x86/um/mem_32.c 	struct vm_area_struct *vma = get_gate_vma(mm);
vma                46 arch/x86/um/mem_32.c 	if (!vma)
vma                49 arch/x86/um/mem_32.c 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
vma                 5 arch/x86/um/mem_64.c const char *arch_vma_name(struct vm_area_struct *vma)
vma                 7 arch/x86/um/mem_64.c 	if (vma->vm_mm && vma->vm_start == um_vdso_addr)
vma                42 arch/x86/xen/mmu.c int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
vma                46 arch/x86/xen/mmu.c 		return xen_xlate_unmap_gfn_range(vma, nr, pages);
vma                20 arch/x86/xen/mmu.h pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
vma                21 arch/x86/xen/mmu.h void  xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
vma               309 arch/x86/xen/mmu_pv.c pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
vma               313 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
vma               317 arch/x86/xen/mmu_pv.c void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
vma               322 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
vma              2715 arch/x86/xen/mmu_pv.c int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
vma              2725 arch/x86/xen/mmu_pv.c 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
vma              2745 arch/x86/xen/mmu_pv.c 		err = apply_to_page_range(vma->vm_mm, addr, range,
vma               125 arch/xtensa/include/asm/cacheflush.h void local_flush_cache_range(struct vm_area_struct *vma,
vma               127 arch/xtensa/include/asm/cacheflush.h void local_flush_cache_page(struct vm_area_struct *vma,
vma               143 arch/xtensa/include/asm/cacheflush.h #define flush_cache_page(vma, addr, pfn)		do { } while (0)
vma               144 arch/xtensa/include/asm/cacheflush.h #define flush_cache_range(vma, start, end)		do { } while (0)
vma               156 arch/xtensa/include/asm/cacheflush.h #define	flush_icache_page(vma,page)			do { } while (0)
vma               170 arch/xtensa/include/asm/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
vma               177 arch/xtensa/include/asm/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma               149 arch/xtensa/include/asm/page.h 			unsigned long vaddr, struct vm_area_struct *vma);
vma               341 arch/xtensa/include/asm/pgtable.h ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
vma               431 arch/xtensa/include/asm/pgtable.h extern  void update_mmu_cache(struct vm_area_struct * vma,
vma                35 arch/xtensa/include/asm/tlbflush.h void local_flush_tlb_page(struct vm_area_struct *vma,
vma                37 arch/xtensa/include/asm/tlbflush.h void local_flush_tlb_range(struct vm_area_struct *vma,
vma                54 arch/xtensa/include/asm/tlbflush.h #define flush_tlb_page(vma, page)	   local_flush_tlb_page(vma, page)
vma                55 arch/xtensa/include/asm/tlbflush.h #define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
vma                74 arch/xtensa/kernel/pci.c int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
vma                85 arch/xtensa/kernel/pci.c 	vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT;
vma               466 arch/xtensa/kernel/smp.c 	struct vm_area_struct *vma;
vma               494 arch/xtensa/kernel/smp.c 	local_flush_tlb_page(fd->vma, fd->addr1);
vma               497 arch/xtensa/kernel/smp.c void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
vma               500 arch/xtensa/kernel/smp.c 		.vma = vma,
vma               509 arch/xtensa/kernel/smp.c 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
vma               512 arch/xtensa/kernel/smp.c void flush_tlb_range(struct vm_area_struct *vma,
vma               516 arch/xtensa/kernel/smp.c 		.vma = vma,
vma               553 arch/xtensa/kernel/smp.c 	local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
vma               556 arch/xtensa/kernel/smp.c void flush_cache_page(struct vm_area_struct *vma,
vma               560 arch/xtensa/kernel/smp.c 		.vma = vma,
vma               570 arch/xtensa/kernel/smp.c 	local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
vma               573 arch/xtensa/kernel/smp.c void flush_cache_range(struct vm_area_struct *vma,
vma               577 arch/xtensa/kernel/smp.c 		.vma = vma,
vma               106 arch/xtensa/mm/cache.c 			unsigned long vaddr, struct vm_area_struct *vma)
vma               180 arch/xtensa/mm/cache.c void local_flush_cache_range(struct vm_area_struct *vma,
vma               195 arch/xtensa/mm/cache.c void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
vma               211 arch/xtensa/mm/cache.c update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
vma               223 arch/xtensa/mm/cache.c 	flush_tlb_page(vma, addr);
vma               241 arch/xtensa/mm/cache.c 	    && (vma->vm_flags & VM_EXEC) != 0) {
vma               258 arch/xtensa/mm/cache.c void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
vma               285 arch/xtensa/mm/cache.c 		if ((vma->vm_flags & VM_EXEC) != 0)
vma               288 arch/xtensa/mm/cache.c 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
vma               294 arch/xtensa/mm/cache.c extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
vma                38 arch/xtensa/mm/fault.c 	struct vm_area_struct * vma;
vma                78 arch/xtensa/mm/fault.c 	vma = find_vma(mm, address);
vma                80 arch/xtensa/mm/fault.c 	if (!vma)
vma                82 arch/xtensa/mm/fault.c 	if (vma->vm_start <= address)
vma                84 arch/xtensa/mm/fault.c 	if (!(vma->vm_flags & VM_GROWSDOWN))
vma                86 arch/xtensa/mm/fault.c 	if (expand_stack(vma, address))
vma                97 arch/xtensa/mm/fault.c 		if (!(vma->vm_flags & VM_WRITE))
vma               101 arch/xtensa/mm/fault.c 		if (!(vma->vm_flags & VM_EXEC))
vma               104 arch/xtensa/mm/fault.c 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
vma               111 arch/xtensa/mm/fault.c 	fault = handle_mm_fault(vma, address, flags);
vma                88 arch/xtensa/mm/tlb.c void local_flush_tlb_range(struct vm_area_struct *vma,
vma                92 arch/xtensa/mm/tlb.c 	struct mm_struct *mm = vma->vm_mm;
vma               107 arch/xtensa/mm/tlb.c 		if (vma->vm_flags & VM_EXEC)
vma               126 arch/xtensa/mm/tlb.c void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
vma               129 arch/xtensa/mm/tlb.c 	struct mm_struct* mm = vma->vm_mm;
vma               141 arch/xtensa/mm/tlb.c 	if (vma->vm_flags & VM_EXEC)
vma              5133 drivers/android/binder.c static void binder_vma_open(struct vm_area_struct *vma)
vma              5135 drivers/android/binder.c 	struct binder_proc *proc = vma->vm_private_data;
vma              5139 drivers/android/binder.c 		     proc->pid, vma->vm_start, vma->vm_end,
vma              5140 drivers/android/binder.c 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
vma              5141 drivers/android/binder.c 		     (unsigned long)pgprot_val(vma->vm_page_prot));
vma              5144 drivers/android/binder.c static void binder_vma_close(struct vm_area_struct *vma)
vma              5146 drivers/android/binder.c 	struct binder_proc *proc = vma->vm_private_data;
vma              5150 drivers/android/binder.c 		     proc->pid, vma->vm_start, vma->vm_end,
vma              5151 drivers/android/binder.c 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
vma              5152 drivers/android/binder.c 		     (unsigned long)pgprot_val(vma->vm_page_prot));
vma              5167 drivers/android/binder.c static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma              5178 drivers/android/binder.c 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
vma              5179 drivers/android/binder.c 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
vma              5180 drivers/android/binder.c 		     (unsigned long)pgprot_val(vma->vm_page_prot));
vma              5182 drivers/android/binder.c 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
vma              5187 drivers/android/binder.c 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma              5188 drivers/android/binder.c 	vma->vm_flags &= ~VM_MAYWRITE;
vma              5190 drivers/android/binder.c 	vma->vm_ops = &binder_vm_ops;
vma              5191 drivers/android/binder.c 	vma->vm_private_data = proc;
vma              5193 drivers/android/binder.c 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
vma              5200 drivers/android/binder.c 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
vma               187 drivers/android/binder_alloc.c 	struct vm_area_struct *vma = NULL;
vma               216 drivers/android/binder_alloc.c 		vma = alloc->vma;
vma               219 drivers/android/binder_alloc.c 	if (!vma && need_mm) {
vma               244 drivers/android/binder_alloc.c 		if (WARN_ON(!vma))
vma               260 drivers/android/binder_alloc.c 		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
vma               310 drivers/android/binder_alloc.c 	return vma ? -ENOMEM : -ESRCH;
vma               315 drivers/android/binder_alloc.c 		struct vm_area_struct *vma)
vma               317 drivers/android/binder_alloc.c 	if (vma)
vma               318 drivers/android/binder_alloc.c 		alloc->vma_vm_mm = vma->vm_mm;
vma               326 drivers/android/binder_alloc.c 	alloc->vma = vma;
vma               332 drivers/android/binder_alloc.c 	struct vm_area_struct *vma = NULL;
vma               334 drivers/android/binder_alloc.c 	if (alloc->vma) {
vma               337 drivers/android/binder_alloc.c 		vma = alloc->vma;
vma               339 drivers/android/binder_alloc.c 	return vma;
vma               679 drivers/android/binder_alloc.c 			      struct vm_area_struct *vma)
vma               691 drivers/android/binder_alloc.c 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
vma               695 drivers/android/binder_alloc.c 	alloc->buffer = (void __user *)vma->vm_start;
vma               718 drivers/android/binder_alloc.c 	binder_alloc_set_vma(alloc, vma);
vma               734 drivers/android/binder_alloc.c 			   alloc->pid, vma->vm_start, vma->vm_end,
vma               748 drivers/android/binder_alloc.c 	BUG_ON(alloc->vma);
vma               921 drivers/android/binder_alloc.c 	struct vm_area_struct *vma;
vma               938 drivers/android/binder_alloc.c 	vma = binder_alloc_get_vma(alloc);
vma               943 drivers/android/binder_alloc.c 	if (vma) {
vma               946 drivers/android/binder_alloc.c 		zap_page_range(vma, page_addr, PAGE_SIZE);
vma                94 drivers/android/binder_alloc.h 	struct vm_area_struct *vma;
vma               130 drivers/android/binder_alloc.h 				     struct vm_area_struct *vma);
vma               290 drivers/android/binder_alloc_selftest.c 	if (!binder_selftest_run || !alloc->vma)
vma                53 drivers/auxdisplay/cfag12864bfb.c static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma                57 drivers/auxdisplay/cfag12864bfb.c 	return vm_map_pages_zero(vma, &pages, 1);
vma               223 drivers/auxdisplay/ht16k33.c static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               228 drivers/auxdisplay/ht16k33.c 	return vm_map_pages_zero(vma, &pages, 1);
vma                21 drivers/char/agp/alpha-agp.c 	dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
vma               565 drivers/char/agp/frontend.c static int agp_mmap(struct file *file, struct vm_area_struct *vma)
vma               582 drivers/char/agp/frontend.c 	size = vma->vm_end - vma->vm_start;
vma               585 drivers/char/agp/frontend.c 	offset = vma->vm_pgoff << PAGE_SHIFT;
vma               597 drivers/char/agp/frontend.c 		if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
vma               602 drivers/char/agp/frontend.c 			vma->vm_ops = kerninfo.vm_ops;
vma               603 drivers/char/agp/frontend.c 		} else if (io_remap_pfn_range(vma, vma->vm_start,
vma               606 drivers/char/agp/frontend.c 				pgprot_writecombine(vma->vm_page_prot))) {
vma               619 drivers/char/agp/frontend.c 			vma->vm_ops = kerninfo.vm_ops;
vma               620 drivers/char/agp/frontend.c 		} else if (io_remap_pfn_range(vma, vma->vm_start,
vma               623 drivers/char/agp/frontend.c 				pgprot_writecombine(vma->vm_page_prot))) {
vma               112 drivers/char/bsr.c static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
vma               114 drivers/char/bsr.c 	unsigned long size   = vma->vm_end - vma->vm_start;
vma               118 drivers/char/bsr.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               122 drivers/char/bsr.c 		ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
vma               123 drivers/char/bsr.c 				   vma->vm_page_prot);
vma               125 drivers/char/bsr.c 		ret = io_remap_pfn_range(vma, vma->vm_start,
vma               127 drivers/char/bsr.c 					 size, vma->vm_page_prot);
vma               379 drivers/char/hpet.c static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
vma               393 drivers/char/hpet.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               394 drivers/char/hpet.c 	return vm_iomap_memory(vma, addr, PAGE_SIZE);
vma               397 drivers/char/hpet.c static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
vma               351 drivers/char/mem.c static inline int private_mapping_ok(struct vm_area_struct *vma)
vma               353 drivers/char/mem.c 	return vma->vm_flags & VM_MAYSHARE;
vma               357 drivers/char/mem.c static inline int private_mapping_ok(struct vm_area_struct *vma)
vma               369 drivers/char/mem.c static int mmap_mem(struct file *file, struct vm_area_struct *vma)
vma               371 drivers/char/mem.c 	size_t size = vma->vm_end - vma->vm_start;
vma               372 drivers/char/mem.c 	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
vma               375 drivers/char/mem.c 	if (offset >> PAGE_SHIFT != vma->vm_pgoff)
vma               382 drivers/char/mem.c 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
vma               385 drivers/char/mem.c 	if (!private_mapping_ok(vma))
vma               388 drivers/char/mem.c 	if (!range_is_allowed(vma->vm_pgoff, size))
vma               391 drivers/char/mem.c 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
vma               392 drivers/char/mem.c 						&vma->vm_page_prot))
vma               395 drivers/char/mem.c 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
vma               397 drivers/char/mem.c 						 vma->vm_page_prot);
vma               399 drivers/char/mem.c 	vma->vm_ops = &mmap_mem_ops;
vma               402 drivers/char/mem.c 	if (remap_pfn_range(vma,
vma               403 drivers/char/mem.c 			    vma->vm_start,
vma               404 drivers/char/mem.c 			    vma->vm_pgoff,
vma               406 drivers/char/mem.c 			    vma->vm_page_prot)) {
vma               412 drivers/char/mem.c static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
vma               417 drivers/char/mem.c 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
vma               429 drivers/char/mem.c 	vma->vm_pgoff = pfn;
vma               430 drivers/char/mem.c 	return mmap_mem(file, vma);
vma               725 drivers/char/mem.c static int mmap_zero(struct file *file, struct vm_area_struct *vma)
vma               730 drivers/char/mem.c 	if (vma->vm_flags & VM_SHARED)
vma               731 drivers/char/mem.c 		return shmem_zero_setup(vma);
vma               732 drivers/char/mem.c 	vma_set_anonymous(vma);
vma                89 drivers/char/mspec.c mspec_open(struct vm_area_struct *vma)
vma                93 drivers/char/mspec.c 	vdata = vma->vm_private_data;
vma               104 drivers/char/mspec.c mspec_close(struct vm_area_struct *vma)
vma               110 drivers/char/mspec.c 	vdata = vma->vm_private_data;
vma               143 drivers/char/mspec.c 	struct vma_data *vdata = vmf->vma->vm_private_data;
vma               165 drivers/char/mspec.c 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
vma               182 drivers/char/mspec.c mspec_mmap(struct file *file, struct vm_area_struct *vma,
vma               188 drivers/char/mspec.c 	if (vma->vm_pgoff != 0)
vma               191 drivers/char/mspec.c 	if ((vma->vm_flags & VM_SHARED) == 0)
vma               194 drivers/char/mspec.c 	if ((vma->vm_flags & VM_WRITE) == 0)
vma               197 drivers/char/mspec.c 	pages = vma_pages(vma);
vma               206 drivers/char/mspec.c 	vdata->vm_start = vma->vm_start;
vma               207 drivers/char/mspec.c 	vdata->vm_end = vma->vm_end;
vma               211 drivers/char/mspec.c 	vma->vm_private_data = vdata;
vma               213 drivers/char/mspec.c 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma               215 drivers/char/mspec.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               216 drivers/char/mspec.c 	vma->vm_ops = &mspec_vm_ops;
vma               222 drivers/char/mspec.c cached_mmap(struct file *file, struct vm_area_struct *vma)
vma               224 drivers/char/mspec.c 	return mspec_mmap(file, vma, MSPEC_CACHED);
vma               228 drivers/char/mspec.c uncached_mmap(struct file *file, struct vm_area_struct *vma)
vma               230 drivers/char/mspec.c 	return mspec_mmap(file, vma, MSPEC_UNCACHED);
vma                43 drivers/char/uv_mmtimer.c static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
vma               147 drivers/char/uv_mmtimer.c static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
vma               151 drivers/char/uv_mmtimer.c 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
vma               154 drivers/char/uv_mmtimer.c 	if (vma->vm_flags & VM_WRITE)
vma               160 drivers/char/uv_mmtimer.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               166 drivers/char/uv_mmtimer.c 	if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT,
vma               167 drivers/char/uv_mmtimer.c 					PAGE_SIZE, vma->vm_page_prot)) {
vma                17 drivers/dax/device.c static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
vma                28 drivers/dax/device.c 	if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
vma                36 drivers/dax/device.c 	if (vma->vm_start & mask || vma->vm_end & mask) {
vma                39 drivers/dax/device.c 				current->comm, func, vma->vm_start, vma->vm_end,
vma                45 drivers/dax/device.c 			&& (vma->vm_flags & VM_DONTCOPY) == 0) {
vma                52 drivers/dax/device.c 	if (!vma_is_dax(vma)) {
vma                86 drivers/dax/device.c 	if (check_vma(dev_dax, vmf->vma, __func__))
vma               107 drivers/dax/device.c 	return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
vma               120 drivers/dax/device.c 	if (check_vma(dev_dax, vmf->vma, __func__))
vma               142 drivers/dax/device.c 	if (pmd_addr < vmf->vma->vm_start ||
vma               143 drivers/dax/device.c 			(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
vma               146 drivers/dax/device.c 	pgoff = linear_page_index(vmf->vma, pmd_addr);
vma               170 drivers/dax/device.c 	if (check_vma(dev_dax, vmf->vma, __func__))
vma               192 drivers/dax/device.c 	if (pud_addr < vmf->vma->vm_start ||
vma               193 drivers/dax/device.c 			(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
vma               196 drivers/dax/device.c 	pgoff = linear_page_index(vmf->vma, pud_addr);
vma               218 drivers/dax/device.c 	struct file *filp = vmf->vma->vm_file;
vma               227 drivers/dax/device.c 			vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
vma               257 drivers/dax/device.c 		pgoff = linear_page_index(vmf->vma, vmf->address
vma               279 drivers/dax/device.c static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
vma               281 drivers/dax/device.c 	struct file *filp = vma->vm_file;
vma               290 drivers/dax/device.c static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
vma               292 drivers/dax/device.c 	struct file *filp = vma->vm_file;
vma               306 drivers/dax/device.c static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
vma               318 drivers/dax/device.c 	rc = check_vma(dev_dax, vma, __func__);
vma               323 drivers/dax/device.c 	vma->vm_ops = &dax_vm_ops;
vma               324 drivers/dax/device.c 	vma->vm_flags |= VM_HUGEPAGE;
vma               116 drivers/dma-buf/dma-buf.c static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
vma               130 drivers/dma-buf/dma-buf.c 	if (vma->vm_pgoff + vma_pages(vma) >
vma               134 drivers/dma-buf/dma-buf.c 	return dmabuf->ops->mmap(dmabuf, vma);
vma              1040 drivers/dma-buf/dma-buf.c int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
vma              1046 drivers/dma-buf/dma-buf.c 	if (WARN_ON(!dmabuf || !vma))
vma              1054 drivers/dma-buf/dma-buf.c 	if (pgoff + vma_pages(vma) < pgoff)
vma              1058 drivers/dma-buf/dma-buf.c 	if (pgoff + vma_pages(vma) >
vma              1064 drivers/dma-buf/dma-buf.c 	oldfile = vma->vm_file;
vma              1065 drivers/dma-buf/dma-buf.c 	vma->vm_file = dmabuf->file;
vma              1066 drivers/dma-buf/dma-buf.c 	vma->vm_pgoff = pgoff;
vma              1068 drivers/dma-buf/dma-buf.c 	ret = dmabuf->ops->mmap(dmabuf, vma);
vma              1071 drivers/dma-buf/dma-buf.c 		vma->vm_file = oldfile;
vma                25 drivers/dma-buf/udmabuf.c 	struct vm_area_struct *vma = vmf->vma;
vma                26 drivers/dma-buf/udmabuf.c 	struct udmabuf *ubuf = vma->vm_private_data;
vma                37 drivers/dma-buf/udmabuf.c static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
vma                41 drivers/dma-buf/udmabuf.c 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
vma                44 drivers/dma-buf/udmabuf.c 	vma->vm_ops = &udmabuf_vm_ops;
vma                45 drivers/dma-buf/udmabuf.c 	vma->vm_private_data = ubuf;
vma              1657 drivers/firewire/core-cdev.c static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
vma              1670 drivers/firewire/core-cdev.c 	if (!(vma->vm_flags & VM_SHARED))
vma              1673 drivers/firewire/core-cdev.c 	if (vma->vm_start & ~PAGE_MASK)
vma              1676 drivers/firewire/core-cdev.c 	client->vm_start = vma->vm_start;
vma              1677 drivers/firewire/core-cdev.c 	size = vma->vm_end - vma->vm_start;
vma              1697 drivers/firewire/core-cdev.c 	ret = fw_iso_buffer_map_vma(&client->buffer, vma);
vma                95 drivers/firewire/core-iso.c 			  struct vm_area_struct *vma)
vma                97 drivers/firewire/core-iso.c 	return vm_map_pages_zero(vma, buffer->pages,
vma               162 drivers/firewire/core.h 			  struct vm_area_struct *vma);
vma               749 drivers/fpga/dfl-afu-main.c static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
vma               753 drivers/fpga/dfl-afu-main.c 	u64 size = vma->vm_end - vma->vm_start;
vma               758 drivers/fpga/dfl-afu-main.c 	if (!(vma->vm_flags & VM_SHARED))
vma               763 drivers/fpga/dfl-afu-main.c 	offset = vma->vm_pgoff << PAGE_SHIFT;
vma               771 drivers/fpga/dfl-afu-main.c 	if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
vma               774 drivers/fpga/dfl-afu-main.c 	if ((vma->vm_flags & VM_WRITE) &&
vma               778 drivers/fpga/dfl-afu-main.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               780 drivers/fpga/dfl-afu-main.c 	return remap_pfn_range(vma, vma->vm_start,
vma               782 drivers/fpga/dfl-afu-main.c 			size, vma->vm_page_prot);
vma               105 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 			  struct vm_area_struct *vma)
vma               112 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	if (!vma->vm_file)
vma               119 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	if (asize < vma->vm_end - vma->vm_start)
vma               126 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
vma               129 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
vma               133 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
vma               134 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
vma                40 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h 			  struct vm_area_struct *vma);
vma              1121 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 			     struct vm_area_struct *vma)
vma              1123 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	return ttm_fbdev_mmap(vma, &bo->tbo);
vma               259 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 				struct vm_area_struct *vma);
vma               794 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct vm_area_struct *vma;
vma               840 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	vma = find_vma(mm, start);
vma               841 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (unlikely(!vma || start < vma->vm_start)) {
vma               846 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		vma->vm_file)) {
vma              1886 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1894 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
vma               102 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
vma              1865 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		      struct vm_area_struct *vma)
vma              1870 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
vma              1875 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
vma              1878 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1885 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		 process->pasid, (unsigned long long) vma->vm_start,
vma              1886 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		 address, vma->vm_flags, PAGE_SIZE);
vma              1888 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	ret = io_remap_pfn_range(vma,
vma              1889 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				vma->vm_start,
vma              1892 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				vma->vm_page_prot);
vma              1897 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1908 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	vm_pgoff = vma->vm_pgoff;
vma              1909 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
vma              1918 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		return kfd_doorbell_mmap(dev, process, vma);
vma              1921 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		return kfd_event_mmap(process, vma);
vma              1926 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		return kfd_reserved_mem_mmap(dev, process, vma);
vma              1930 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		return kfd_mmio_mmap(dev, process, vma);
vma               130 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 		      struct vm_area_struct *vma)
vma               138 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 	if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
vma               144 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
vma               147 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               154 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 		 (unsigned long long) vma->vm_start, address, vma->vm_flags,
vma               158 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 	return io_remap_pfn_range(vma,
vma               159 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 				vma->vm_start,
vma               162 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c 				vma->vm_page_prot);
vma               782 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
vma               790 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			get_order(vma->vm_end - vma->vm_start)) {
vma               805 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
vma               809 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
vma               810 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
vma               812 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
vma               814 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			vma->vm_end - vma->vm_start);
vma               816 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page->user_address = (uint64_t __user *)vma->vm_start;
vma               819 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
vma               820 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
vma               822 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		p->signal_mapped_size = vma->vm_end - vma->vm_start;
vma               881 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct vm_area_struct *vma;
vma               906 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	vma = find_vma(mm, address);
vma               914 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (vma && address >= vma->vm_start) {
vma               917 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (is_write_requested && !(vma->vm_flags & VM_WRITE))
vma               922 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
vma               782 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 			  struct vm_area_struct *vma);
vma               813 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 		      struct vm_area_struct *vma);
vma              1007 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
vma              1117 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			  struct vm_area_struct *vma)
vma              1122 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
vma              1139 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
vma              1142 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	return remap_pfn_range(vma, vma->vm_start,
vma              1144 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
vma                20 drivers/gpu/drm/armada/armada_gem.c 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
vma                24 drivers/gpu/drm/armada/armada_gem.c 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
vma                25 drivers/gpu/drm/armada/armada_gem.c 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
vma               475 drivers/gpu/drm/armada/armada_gem.c armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
vma               222 drivers/gpu/drm/cirrus/cirrus_drv.h int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
vma              2157 drivers/gpu/drm/drm_fb_helper.c static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma              2162 drivers/gpu/drm/drm_fb_helper.c 		return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
vma              1051 drivers/gpu/drm/drm_gem.c void drm_gem_vm_open(struct vm_area_struct *vma)
vma              1053 drivers/gpu/drm/drm_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma              1066 drivers/gpu/drm/drm_gem.c void drm_gem_vm_close(struct vm_area_struct *vma)
vma              1068 drivers/gpu/drm/drm_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma              1099 drivers/gpu/drm/drm_gem.c 		     struct vm_area_struct *vma)
vma              1104 drivers/gpu/drm/drm_gem.c 	if (obj_size < vma->vm_end - vma->vm_start)
vma              1108 drivers/gpu/drm/drm_gem.c 		vma->vm_ops = obj->funcs->vm_ops;
vma              1110 drivers/gpu/drm/drm_gem.c 		vma->vm_ops = dev->driver->gem_vm_ops;
vma              1114 drivers/gpu/drm/drm_gem.c 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma              1115 drivers/gpu/drm/drm_gem.c 	vma->vm_private_data = obj;
vma              1116 drivers/gpu/drm/drm_gem.c 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma              1117 drivers/gpu/drm/drm_gem.c 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma              1146 drivers/gpu/drm/drm_gem.c int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1159 drivers/gpu/drm/drm_gem.c 						  vma->vm_pgoff,
vma              1160 drivers/gpu/drm/drm_gem.c 						  vma_pages(vma));
vma              1187 drivers/gpu/drm/drm_gem.c 		if (vma->vm_flags & VM_WRITE) {
vma              1192 drivers/gpu/drm/drm_gem.c 		vma->vm_flags &= ~VM_MAYWRITE;
vma              1196 drivers/gpu/drm/drm_gem.c 			       vma);
vma               270 drivers/gpu/drm/drm_gem_cma_helper.c 				struct vm_area_struct *vma)
vma               279 drivers/gpu/drm/drm_gem_cma_helper.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               280 drivers/gpu/drm/drm_gem_cma_helper.c 	vma->vm_pgoff = 0;
vma               282 drivers/gpu/drm/drm_gem_cma_helper.c 	ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
vma               283 drivers/gpu/drm/drm_gem_cma_helper.c 			  cma_obj->paddr, vma->vm_end - vma->vm_start);
vma               285 drivers/gpu/drm/drm_gem_cma_helper.c 		drm_gem_vm_close(vma);
vma               308 drivers/gpu/drm/drm_gem_cma_helper.c int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
vma               314 drivers/gpu/drm/drm_gem_cma_helper.c 	ret = drm_gem_mmap(filp, vma);
vma               318 drivers/gpu/drm/drm_gem_cma_helper.c 	gem_obj = vma->vm_private_data;
vma               321 drivers/gpu/drm/drm_gem_cma_helper.c 	return drm_gem_cma_mmap_obj(cma_obj, vma);
vma               522 drivers/gpu/drm/drm_gem_cma_helper.c 			   struct vm_area_struct *vma)
vma               527 drivers/gpu/drm/drm_gem_cma_helper.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma               532 drivers/gpu/drm/drm_gem_cma_helper.c 	return drm_gem_cma_mmap_obj(cma_obj, vma);
vma               473 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct vm_area_struct *vma = vmf->vma;
vma               474 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               484 drivers/gpu/drm/drm_gem_shmem_helper.c 	return vmf_insert_page(vma, vmf->address, page);
vma               487 drivers/gpu/drm/drm_gem_shmem_helper.c static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
vma               489 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               496 drivers/gpu/drm/drm_gem_shmem_helper.c 	drm_gem_vm_open(vma);
vma               499 drivers/gpu/drm/drm_gem_shmem_helper.c static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
vma               501 drivers/gpu/drm/drm_gem_shmem_helper.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               505 drivers/gpu/drm/drm_gem_shmem_helper.c 	drm_gem_vm_close(vma);
vma               531 drivers/gpu/drm/drm_gem_shmem_helper.c int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               536 drivers/gpu/drm/drm_gem_shmem_helper.c 	ret = drm_gem_mmap(filp, vma);
vma               540 drivers/gpu/drm/drm_gem_shmem_helper.c 	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
vma               544 drivers/gpu/drm/drm_gem_shmem_helper.c 		drm_gem_vm_close(vma);
vma               549 drivers/gpu/drm/drm_gem_shmem_helper.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               550 drivers/gpu/drm/drm_gem_shmem_helper.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               553 drivers/gpu/drm/drm_gem_shmem_helper.c 	vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
vma               710 drivers/gpu/drm/drm_prime.c int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma               731 drivers/gpu/drm/drm_prime.c 	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
vma               733 drivers/gpu/drm/drm_prime.c 	ret = obj->dev->driver->fops->mmap(fil, vma);
vma               758 drivers/gpu/drm/drm_prime.c int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
vma               766 drivers/gpu/drm/drm_prime.c 	return dev->driver->gem_prime_mmap(obj, vma);
vma                62 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma;
vma                66 drivers/gpu/drm/drm_vm.c static void drm_vm_open(struct vm_area_struct *vma);
vma                67 drivers/gpu/drm/drm_vm.c static void drm_vm_close(struct vm_area_struct *vma);
vma                70 drivers/gpu/drm/drm_vm.c 			    struct vm_area_struct *vma)
vma                72 drivers/gpu/drm/drm_vm.c 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
vma                84 drivers/gpu/drm/drm_vm.c 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma                85 drivers/gpu/drm/drm_vm.c 				    vma->vm_start))
vma                95 drivers/gpu/drm/drm_vm.c static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
vma                97 drivers/gpu/drm/drm_vm.c 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
vma               118 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vma               119 drivers/gpu/drm/drm_vm.c 	struct drm_file *priv = vma->vm_file->private_data;
vma               134 drivers/gpu/drm/drm_vm.c 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
vma               145 drivers/gpu/drm/drm_vm.c 		resource_size_t offset = vmf->address - vma->vm_start;
vma               207 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vma               208 drivers/gpu/drm/drm_vm.c 	struct drm_local_map *map = vma->vm_private_data;
vma               216 drivers/gpu/drm/drm_vm.c 	offset = vmf->address - vma->vm_start;
vma               236 drivers/gpu/drm/drm_vm.c static void drm_vm_shm_close(struct vm_area_struct *vma)
vma               238 drivers/gpu/drm/drm_vm.c 	struct drm_file *priv = vma->vm_file->private_data;
vma               246 drivers/gpu/drm/drm_vm.c 		  vma->vm_start, vma->vm_end - vma->vm_start);
vma               248 drivers/gpu/drm/drm_vm.c 	map = vma->vm_private_data;
vma               252 drivers/gpu/drm/drm_vm.c 		if (pt->vma->vm_private_data == map)
vma               254 drivers/gpu/drm/drm_vm.c 		if (pt->vma == vma) {
vma               309 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vma               310 drivers/gpu/drm/drm_vm.c 	struct drm_file *priv = vma->vm_file->private_data;
vma               322 drivers/gpu/drm/drm_vm.c 	offset = vmf->address - vma->vm_start;
vma               344 drivers/gpu/drm/drm_vm.c 	struct vm_area_struct *vma = vmf->vma;
vma               345 drivers/gpu/drm/drm_vm.c 	struct drm_local_map *map = vma->vm_private_data;
vma               346 drivers/gpu/drm/drm_vm.c 	struct drm_file *priv = vma->vm_file->private_data;
vma               359 drivers/gpu/drm/drm_vm.c 	offset = vmf->address - vma->vm_start;
vma               398 drivers/gpu/drm/drm_vm.c 			       struct vm_area_struct *vma)
vma               403 drivers/gpu/drm/drm_vm.c 		  vma->vm_start, vma->vm_end - vma->vm_start);
vma               407 drivers/gpu/drm/drm_vm.c 		vma_entry->vma = vma;
vma               413 drivers/gpu/drm/drm_vm.c static void drm_vm_open(struct vm_area_struct *vma)
vma               415 drivers/gpu/drm/drm_vm.c 	struct drm_file *priv = vma->vm_file->private_data;
vma               419 drivers/gpu/drm/drm_vm.c 	drm_vm_open_locked(dev, vma);
vma               424 drivers/gpu/drm/drm_vm.c 				struct vm_area_struct *vma)
vma               429 drivers/gpu/drm/drm_vm.c 		  vma->vm_start, vma->vm_end - vma->vm_start);
vma               432 drivers/gpu/drm/drm_vm.c 		if (pt->vma == vma) {
vma               448 drivers/gpu/drm/drm_vm.c static void drm_vm_close(struct vm_area_struct *vma)
vma               450 drivers/gpu/drm/drm_vm.c 	struct drm_file *priv = vma->vm_file->private_data;
vma               454 drivers/gpu/drm/drm_vm.c 	drm_vm_close_locked(dev, vma);
vma               468 drivers/gpu/drm/drm_vm.c static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma               473 drivers/gpu/drm/drm_vm.c 	unsigned long length = vma->vm_end - vma->vm_start;
vma               478 drivers/gpu/drm/drm_vm.c 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
vma               487 drivers/gpu/drm/drm_vm.c 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
vma               489 drivers/gpu/drm/drm_vm.c 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
vma               494 drivers/gpu/drm/drm_vm.c 		vma->vm_page_prot =
vma               497 drivers/gpu/drm/drm_vm.c 			      (__pte(pgprot_val(vma->vm_page_prot)))));
vma               501 drivers/gpu/drm/drm_vm.c 	vma->vm_ops = &drm_vm_dma_ops;
vma               503 drivers/gpu/drm/drm_vm.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               505 drivers/gpu/drm/drm_vm.c 	drm_vm_open_locked(dev, vma);
vma               531 drivers/gpu/drm/drm_vm.c static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma               540 drivers/gpu/drm/drm_vm.c 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
vma               549 drivers/gpu/drm/drm_vm.c 	if (!vma->vm_pgoff
vma               555 drivers/gpu/drm/drm_vm.c 		return drm_mmap_dma(filp, vma);
vma               557 drivers/gpu/drm/drm_vm.c 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
vma               567 drivers/gpu/drm/drm_vm.c 	if (map->size < vma->vm_end - vma->vm_start)
vma               571 drivers/gpu/drm/drm_vm.c 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
vma               573 drivers/gpu/drm/drm_vm.c 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
vma               578 drivers/gpu/drm/drm_vm.c 		vma->vm_page_prot =
vma               581 drivers/gpu/drm/drm_vm.c 			      (__pte(pgprot_val(vma->vm_page_prot)))));
vma               595 drivers/gpu/drm/drm_vm.c 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               597 drivers/gpu/drm/drm_vm.c 			vma->vm_ops = &drm_vm_ops;
vma               605 drivers/gpu/drm/drm_vm.c 		vma->vm_page_prot = drm_io_prot(map, vma);
vma               606 drivers/gpu/drm/drm_vm.c 		if (io_remap_pfn_range(vma, vma->vm_start,
vma               608 drivers/gpu/drm/drm_vm.c 				       vma->vm_end - vma->vm_start,
vma               609 drivers/gpu/drm/drm_vm.c 				       vma->vm_page_prot))
vma               614 drivers/gpu/drm/drm_vm.c 			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
vma               616 drivers/gpu/drm/drm_vm.c 		vma->vm_ops = &drm_vm_ops;
vma               621 drivers/gpu/drm/drm_vm.c 		if (remap_pfn_range(vma, vma->vm_start,
vma               623 drivers/gpu/drm/drm_vm.c 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma               625 drivers/gpu/drm/drm_vm.c 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
vma               628 drivers/gpu/drm/drm_vm.c 		vma->vm_ops = &drm_vm_shm_ops;
vma               629 drivers/gpu/drm/drm_vm.c 		vma->vm_private_data = (void *)map;
vma               632 drivers/gpu/drm/drm_vm.c 		vma->vm_ops = &drm_vm_sg_ops;
vma               633 drivers/gpu/drm/drm_vm.c 		vma->vm_private_data = (void *)map;
vma               634 drivers/gpu/drm/drm_vm.c 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
vma               639 drivers/gpu/drm/drm_vm.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               641 drivers/gpu/drm/drm_vm.c 	drm_vm_open_locked(dev, vma);
vma               645 drivers/gpu/drm/drm_vm.c int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
vma               655 drivers/gpu/drm/drm_vm.c 	ret = drm_mmap_locked(filp, vma);
vma               665 drivers/gpu/drm/drm_vm.c 	struct drm_vma_entry *vma, *vma_temp;
vma               668 drivers/gpu/drm/drm_vm.c 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
vma               669 drivers/gpu/drm/drm_vm.c 		list_del(&vma->head);
vma               670 drivers/gpu/drm/drm_vm.c 		kfree(vma);
vma               207 drivers/gpu/drm/drm_vram_mm_helper.c int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
vma               210 drivers/gpu/drm/drm_vram_mm_helper.c 	return ttm_bo_mmap(filp, vma, &vmm->bdev);
vma               287 drivers/gpu/drm/drm_vram_mm_helper.c 	struct file *filp, struct vm_area_struct *vma)
vma               295 drivers/gpu/drm/drm_vram_mm_helper.c 	return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
vma                50 drivers/gpu/drm/etnaviv/etnaviv_drv.h int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                57 drivers/gpu/drm/etnaviv/etnaviv_drv.h 			   struct vm_area_struct *vma);
vma               128 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		struct vm_area_struct *vma)
vma               132 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               133 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               135 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma               138 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
vma               140 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
vma               147 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		fput(vma->vm_file);
vma               149 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		vma->vm_pgoff = 0;
vma               150 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		vma->vm_file  = etnaviv_obj->base.filp;
vma               152 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		vma->vm_page_prot = vm_page_prot;
vma               158 drivers/gpu/drm/etnaviv/etnaviv_gem.c int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               163 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               169 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	obj = to_etnaviv_bo(vma->vm_private_data);
vma               170 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	return obj->ops->mmap(obj, vma);
vma               175 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               176 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               200 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               207 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	return vmf_insert_page(vma, vmf->address, page);
vma               711 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		struct vm_area_struct *vma)
vma                36 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 			   struct vm_area_struct *vma)
vma                41 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma                45 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 	return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
vma                94 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 		struct vm_area_struct *vma)
vma                96 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 	return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
vma                37 drivers/gpu/drm/exynos/exynos_drm_fbdev.c 			struct vm_area_struct *vma)
vma                45 drivers/gpu/drm/exynos/exynos_drm_fbdev.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma                47 drivers/gpu/drm/exynos/exynos_drm_fbdev.c 	vm_size = vma->vm_end - vma->vm_start;
vma                52 drivers/gpu/drm/exynos/exynos_drm_fbdev.c 	ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
vma               292 drivers/gpu/drm/exynos/exynos_drm_gem.c 				      struct vm_area_struct *vma)
vma               298 drivers/gpu/drm/exynos/exynos_drm_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               299 drivers/gpu/drm/exynos/exynos_drm_gem.c 	vma->vm_pgoff = 0;
vma               301 drivers/gpu/drm/exynos/exynos_drm_gem.c 	vm_size = vma->vm_end - vma->vm_start;
vma               307 drivers/gpu/drm/exynos/exynos_drm_gem.c 	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
vma               386 drivers/gpu/drm/exynos/exynos_drm_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               387 drivers/gpu/drm/exynos/exynos_drm_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               392 drivers/gpu/drm/exynos/exynos_drm_gem.c 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               400 drivers/gpu/drm/exynos/exynos_drm_gem.c 	return vmf_insert_mixed(vma, vmf->address,
vma               405 drivers/gpu/drm/exynos/exynos_drm_gem.c 				   struct vm_area_struct *vma)
vma               415 drivers/gpu/drm/exynos/exynos_drm_gem.c 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma               417 drivers/gpu/drm/exynos/exynos_drm_gem.c 		vma->vm_page_prot =
vma               418 drivers/gpu/drm/exynos/exynos_drm_gem.c 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma               420 drivers/gpu/drm/exynos/exynos_drm_gem.c 		vma->vm_page_prot =
vma               421 drivers/gpu/drm/exynos/exynos_drm_gem.c 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
vma               423 drivers/gpu/drm/exynos/exynos_drm_gem.c 	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
vma               430 drivers/gpu/drm/exynos/exynos_drm_gem.c 	drm_gem_vm_close(vma);
vma               435 drivers/gpu/drm/exynos/exynos_drm_gem.c int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               441 drivers/gpu/drm/exynos/exynos_drm_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               447 drivers/gpu/drm/exynos/exynos_drm_gem.c 	obj = vma->vm_private_data;
vma               450 drivers/gpu/drm/exynos/exynos_drm_gem.c 		return dma_buf_mmap(obj->dma_buf, vma, 0);
vma               452 drivers/gpu/drm/exynos/exynos_drm_gem.c 	return exynos_drm_gem_mmap_obj(obj, vma);
vma               537 drivers/gpu/drm/exynos/exynos_drm_gem.c 			      struct vm_area_struct *vma)
vma               541 drivers/gpu/drm/exynos/exynos_drm_gem.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma               545 drivers/gpu/drm/exynos/exynos_drm_gem.c 	return exynos_drm_gem_mmap_obj(obj, vma);
vma               108 drivers/gpu/drm/exynos/exynos_drm_gem.h int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma               121 drivers/gpu/drm/exynos/exynos_drm_gem.h 			      struct vm_area_struct *vma);
vma               101 drivers/gpu/drm/gma500/framebuffer.c 	struct vm_area_struct *vma = vmf->vma;
vma               102 drivers/gpu/drm/gma500/framebuffer.c 	struct psb_framebuffer *psbfb = vma->vm_private_data;
vma               114 drivers/gpu/drm/gma500/framebuffer.c 	page_num = vma_pages(vma);
vma               117 drivers/gpu/drm/gma500/framebuffer.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               122 drivers/gpu/drm/gma500/framebuffer.c 		ret = vmf_insert_mixed(vma, address,
vma               132 drivers/gpu/drm/gma500/framebuffer.c static void psbfb_vm_open(struct vm_area_struct *vma)
vma               136 drivers/gpu/drm/gma500/framebuffer.c static void psbfb_vm_close(struct vm_area_struct *vma)
vma               146 drivers/gpu/drm/gma500/framebuffer.c static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               151 drivers/gpu/drm/gma500/framebuffer.c 	if (vma->vm_pgoff != 0)
vma               153 drivers/gpu/drm/gma500/framebuffer.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma               157 drivers/gpu/drm/gma500/framebuffer.c 		psbfb->addr_space = vma->vm_file->f_mapping;
vma               163 drivers/gpu/drm/gma500/framebuffer.c 	vma->vm_ops = &psbfb_vm_ops;
vma               164 drivers/gpu/drm/gma500/framebuffer.c 	vma->vm_private_data = (void *)psbfb;
vma               165 drivers/gpu/drm/gma500/framebuffer.c 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma               128 drivers/gpu/drm/gma500/gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               138 drivers/gpu/drm/gma500/gem.c 	obj = vma->vm_private_data;	/* GEM object */
vma               162 drivers/gpu/drm/gma500/gem.c 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               169 drivers/gpu/drm/gma500/gem.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
vma                94 drivers/gpu/drm/i810/i810_dma.c static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma               107 drivers/gpu/drm/i810/i810_dma.c 	vma->vm_flags |= VM_DONTCOPY;
vma               111 drivers/gpu/drm/i810/i810_dma.c 	if (io_remap_pfn_range(vma, vma->vm_start,
vma               112 drivers/gpu/drm/i810/i810_dma.c 			       vma->vm_pgoff,
vma               113 drivers/gpu/drm/i810/i810_dma.c 			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma                95 drivers/gpu/drm/i915/display/intel_atomic_plane.c 	intel_state->vma = NULL;
vma               113 drivers/gpu/drm/i915/display/intel_atomic_plane.c 	WARN_ON(to_intel_plane_state(state)->vma);
vma              2078 drivers/gpu/drm/i915/display/intel_display.c 	struct i915_vma *vma;
vma              2118 drivers/gpu/drm/i915/display/intel_display.c 	vma = i915_gem_object_pin_to_display_plane(obj,
vma              2120 drivers/gpu/drm/i915/display/intel_display.c 	if (IS_ERR(vma))
vma              2123 drivers/gpu/drm/i915/display/intel_display.c 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
vma              2142 drivers/gpu/drm/i915/display/intel_display.c 		ret = i915_vma_pin_fence(vma);
vma              2144 drivers/gpu/drm/i915/display/intel_display.c 			i915_gem_object_unpin_from_display_plane(vma);
vma              2145 drivers/gpu/drm/i915/display/intel_display.c 			vma = ERR_PTR(ret);
vma              2149 drivers/gpu/drm/i915/display/intel_display.c 		if (ret == 0 && vma->fence)
vma              2153 drivers/gpu/drm/i915/display/intel_display.c 	i915_vma_get(vma);
vma              2159 drivers/gpu/drm/i915/display/intel_display.c 	return vma;
vma              2162 drivers/gpu/drm/i915/display/intel_display.c void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
vma              2164 drivers/gpu/drm/i915/display/intel_display.c 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma              2166 drivers/gpu/drm/i915/display/intel_display.c 	i915_gem_object_lock(vma->obj);
vma              2168 drivers/gpu/drm/i915/display/intel_display.c 		i915_vma_unpin_fence(vma);
vma              2169 drivers/gpu/drm/i915/display/intel_display.c 	i915_gem_object_unpin_from_display_plane(vma);
vma              2170 drivers/gpu/drm/i915/display/intel_display.c 	i915_gem_object_unlock(vma->obj);
vma              2172 drivers/gpu/drm/i915/display/intel_display.c 	i915_vma_put(vma);
vma              3208 drivers/gpu/drm/i915/display/intel_display.c 		if (!state->vma)
vma              3237 drivers/gpu/drm/i915/display/intel_display.c 	intel_state->vma =
vma              3243 drivers/gpu/drm/i915/display/intel_display.c 	if (IS_ERR(intel_state->vma)) {
vma              3245 drivers/gpu/drm/i915/display/intel_display.c 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
vma              3247 drivers/gpu/drm/i915/display/intel_display.c 		intel_state->vma = NULL;
vma              14307 drivers/gpu/drm/i915/display/intel_display.c 	struct i915_vma *vma;
vma              14320 drivers/gpu/drm/i915/display/intel_display.c 	vma = intel_pin_and_fence_fb_obj(fb,
vma              14324 drivers/gpu/drm/i915/display/intel_display.c 	if (IS_ERR(vma))
vma              14325 drivers/gpu/drm/i915/display/intel_display.c 		return PTR_ERR(vma);
vma              14327 drivers/gpu/drm/i915/display/intel_display.c 	plane_state->vma = vma;
vma              14334 drivers/gpu/drm/i915/display/intel_display.c 	struct i915_vma *vma;
vma              14336 drivers/gpu/drm/i915/display/intel_display.c 	vma = fetch_and_zero(&old_plane_state->vma);
vma              14337 drivers/gpu/drm/i915/display/intel_display.c 	if (vma)
vma              14338 drivers/gpu/drm/i915/display/intel_display.c 		intel_unpin_fb_vma(vma, old_plane_state->flags);
vma               478 drivers/gpu/drm/i915/display/intel_display.h void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
vma               104 drivers/gpu/drm/i915/display/intel_display_types.h 	struct i915_vma *vma;
vma               513 drivers/gpu/drm/i915/display/intel_display_types.h 	struct i915_vma *vma;
vma              1523 drivers/gpu/drm/i915/display/intel_display_types.h 	return i915_ggtt_offset(state->vma);
vma               159 drivers/gpu/drm/i915/display/intel_fbc.c 	fbc_ctl |= params->vma->fence->id;
vma               180 drivers/gpu/drm/i915/display/intel_fbc.c 		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
vma               240 drivers/gpu/drm/i915/display/intel_fbc.c 			dpfc_ctl |= params->vma->fence->id;
vma               244 drivers/gpu/drm/i915/display/intel_fbc.c 				   params->vma->fence->id);
vma               257 drivers/gpu/drm/i915/display/intel_fbc.c 		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
vma               293 drivers/gpu/drm/i915/display/intel_fbc.c 		if (i915_gem_object_get_tiling(params->vma->obj) !=
vma               324 drivers/gpu/drm/i915/display/intel_fbc.c 			   params->vma->fence->id);
vma               666 drivers/gpu/drm/i915/display/intel_fbc.c 	cache->vma = NULL;
vma               694 drivers/gpu/drm/i915/display/intel_fbc.c 	cache->vma = plane_state->vma;
vma               696 drivers/gpu/drm/i915/display/intel_fbc.c 	if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
vma               714 drivers/gpu/drm/i915/display/intel_fbc.c 	if (!cache->vma) {
vma               839 drivers/gpu/drm/i915/display/intel_fbc.c 	params->vma = cache->vma;
vma               178 drivers/gpu/drm/i915/display/intel_fbdev.c 	struct i915_vma *vma;
vma               214 drivers/gpu/drm/i915/display/intel_fbdev.c 	vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
vma               216 drivers/gpu/drm/i915/display/intel_fbdev.c 	if (IS_ERR(vma)) {
vma               217 drivers/gpu/drm/i915/display/intel_fbdev.c 		ret = PTR_ERR(vma);
vma               240 drivers/gpu/drm/i915/display/intel_fbdev.c 		(unsigned long)(ggtt->gmadr.start + vma->node.start);
vma               241 drivers/gpu/drm/i915/display/intel_fbdev.c 	info->fix.smem_len = vma->node.size;
vma               243 drivers/gpu/drm/i915/display/intel_fbdev.c 	vaddr = i915_vma_pin_iomap(vma);
vma               250 drivers/gpu/drm/i915/display/intel_fbdev.c 	info->screen_size = vma->node.size;
vma               258 drivers/gpu/drm/i915/display/intel_fbdev.c 	if (vma->obj->stolen && !prealloc)
vma               265 drivers/gpu/drm/i915/display/intel_fbdev.c 		      i915_ggtt_offset(vma));
vma               266 drivers/gpu/drm/i915/display/intel_fbdev.c 	ifbdev->vma = vma;
vma               275 drivers/gpu/drm/i915/display/intel_fbdev.c 	intel_unpin_fb_vma(vma, flags);
vma               295 drivers/gpu/drm/i915/display/intel_fbdev.c 	if (ifbdev->vma) {
vma               297 drivers/gpu/drm/i915/display/intel_fbdev.c 		intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
vma               555 drivers/gpu/drm/i915/display/intel_fbdev.c 	if (!ifbdev || !ifbdev->vma)
vma               617 drivers/gpu/drm/i915/display/intel_fbdev.c 	if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
vma               629 drivers/gpu/drm/i915/display/intel_fbdev.c 	if (!ifbdev->vma)
vma               180 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_vma *vma;
vma               278 drivers/gpu/drm/i915/display/intel_overlay.c 				       struct i915_vma *vma)
vma               284 drivers/gpu/drm/i915/display/intel_overlay.c 	intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
vma               285 drivers/gpu/drm/i915/display/intel_overlay.c 				vma ? vma->obj->frontbuffer : NULL,
vma               291 drivers/gpu/drm/i915/display/intel_overlay.c 	overlay->old_vma = overlay->vma;
vma               292 drivers/gpu/drm/i915/display/intel_overlay.c 	if (vma)
vma               293 drivers/gpu/drm/i915/display/intel_overlay.c 		overlay->vma = i915_vma_get(vma);
vma               295 drivers/gpu/drm/i915/display/intel_overlay.c 		overlay->vma = NULL;
vma               300 drivers/gpu/drm/i915/display/intel_overlay.c 				  struct i915_vma *vma,
vma               332 drivers/gpu/drm/i915/display/intel_overlay.c 	intel_overlay_flip_prepare(overlay, vma);
vma               340 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_vma *vma;
vma               342 drivers/gpu/drm/i915/display/intel_overlay.c 	vma = fetch_and_zero(&overlay->old_vma);
vma               343 drivers/gpu/drm/i915/display/intel_overlay.c 	if (WARN_ON(!vma))
vma               349 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_gem_object_unpin_from_display_plane(vma);
vma               350 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_vma_put(vma);
vma               751 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_vma *vma;
vma               764 drivers/gpu/drm/i915/display/intel_overlay.c 	vma = i915_gem_object_pin_to_display_plane(new_bo,
vma               767 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(vma)) {
vma               768 drivers/gpu/drm/i915/display/intel_overlay.c 		ret = PTR_ERR(vma);
vma               800 drivers/gpu/drm/i915/display/intel_overlay.c 	iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
vma               817 drivers/gpu/drm/i915/display/intel_overlay.c 		iowrite32(i915_ggtt_offset(vma) + params->offset_U,
vma               819 drivers/gpu/drm/i915/display/intel_overlay.c 		iowrite32(i915_ggtt_offset(vma) + params->offset_V,
vma               836 drivers/gpu/drm/i915/display/intel_overlay.c 	ret = intel_overlay_continue(overlay, vma, scale_changed);
vma               843 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_gem_object_unpin_from_display_plane(vma);
vma              1303 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_vma *vma;
vma              1316 drivers/gpu/drm/i915/display/intel_overlay.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
vma              1317 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(vma)) {
vma              1318 drivers/gpu/drm/i915/display/intel_overlay.c 		err = PTR_ERR(vma);
vma              1325 drivers/gpu/drm/i915/display/intel_overlay.c 		overlay->flip_addr = i915_ggtt_offset(vma);
vma              1326 drivers/gpu/drm/i915/display/intel_overlay.c 	overlay->regs = i915_vma_pin_iomap(vma);
vma              1327 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_vma_unpin(vma);
vma                14 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	struct i915_vma *vma;
vma                20 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c static int vma_set_pages(struct i915_vma *vma)
vma                22 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	struct i915_sleeve *sleeve = vma->private;
vma                24 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma->pages = sleeve->pages;
vma                25 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma->page_sizes = sleeve->page_sizes;
vma                30 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c static void vma_clear_pages(struct i915_vma *vma)
vma                32 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	GEM_BUG_ON(!vma->pages);
vma                33 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma->pages = NULL;
vma                36 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c static int vma_bind(struct i915_vma *vma,
vma                40 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
vma                43 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c static void vma_unbind(struct i915_vma *vma)
vma                45 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma->vm->vma_ops.unbind_vma(vma);
vma                61 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	struct i915_vma *vma;
vma                68 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma = i915_vma_instance(obj, vm, NULL);
vma                69 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	if (IS_ERR(vma)) {
vma                70 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 		err = PTR_ERR(vma);
vma                74 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma->private = sleeve;
vma                75 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	vma->ops = &proxy_vma_ops;
vma                77 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	sleeve->vma = vma;
vma               159 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
vma               160 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	struct i915_vma *vma = w->sleeve->vma;
vma               178 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               182 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
vma               214 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	err = i915_active_ref(&vma->active, rq->timeline, rq);
vma               231 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	i915_vma_unpin(vma);
vma               106 drivers/gpu/drm/i915/gem/i915_gem_context.c 		struct i915_vma *vma = rcu_dereference_raw(*slot);
vma               107 drivers/gpu/drm/i915/gem/i915_gem_context.c 		struct drm_i915_gem_object *obj = vma->obj;
vma               131 drivers/gpu/drm/i915/gem/i915_gem_context.c 			if (atomic_dec_and_test(&vma->open_count) &&
vma               132 drivers/gpu/drm/i915/gem/i915_gem_context.c 			    !i915_vma_is_ggtt(vma))
vma               133 drivers/gpu/drm/i915/gem/i915_gem_context.c 				i915_vma_close(vma);
vma               130 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
vma               135 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	if (obj->base.size < vma->vm_end - vma->vm_start)
vma               141 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	ret = call_mmap(obj->base.filp, vma);
vma               145 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	fput(vma->vm_file);
vma               146 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 	vma->vm_file = get_file(obj->base.filp);
vma               178 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	struct i915_vma *vma;
vma               192 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
vma               193 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		if (!drm_mm_node_allocated(&vma->node))
vma               196 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		if (i915_vma_is_pinned(vma)) {
vma               201 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		if (!i915_vma_is_closed(vma) &&
vma               202 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		    i915_gem_valid_gtt_space(vma, cache_level))
vma               205 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		ret = i915_vma_unbind(vma);
vma               267 drivers/gpu/drm/i915/gem/i915_gem_domain.c 			for_each_ggtt_vma(vma, obj) {
vma               268 drivers/gpu/drm/i915/gem/i915_gem_domain.c 				ret = i915_vma_revoke_fence(vma);
vma               287 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		list_for_each_entry(vma, &obj->vma.list, obj_link) {
vma               288 drivers/gpu/drm/i915/gem/i915_gem_domain.c 			if (!drm_mm_node_allocated(&vma->node))
vma               291 drivers/gpu/drm/i915/gem/i915_gem_domain.c 			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
vma               297 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	list_for_each_entry(vma, &obj->vma.list, obj_link)
vma               298 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		vma->node.color = cache_level;
vma               420 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	struct i915_vma *vma;
vma               443 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		vma = ERR_PTR(ret);
vma               454 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	vma = ERR_PTR(-ENOSPC);
vma               457 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
vma               461 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	if (IS_ERR(vma))
vma               462 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
vma               463 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	if (IS_ERR(vma))
vma               466 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
vma               475 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	return vma;
vma               479 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	return vma;
vma               485 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	struct i915_vma *vma;
vma               490 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	for_each_ggtt_vma(vma, obj) {
vma               491 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		if (!drm_mm_node_allocated(&vma->node))
vma               494 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		list_move_tail(&vma->vm_link, &vma->vm->bound_list);
vma               511 drivers/gpu/drm/i915/gem/i915_gem_domain.c i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
vma               513 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	struct drm_i915_gem_object *obj = vma->obj;
vma               521 drivers/gpu/drm/i915/gem/i915_gem_domain.c 		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
vma               526 drivers/gpu/drm/i915/gem/i915_gem_domain.c 	i915_vma_unpin(vma);
vma               221 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_vma **vma;
vma               352 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		 const struct i915_vma *vma,
vma               355 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (vma->node.size < entry->pad_to_size)
vma               358 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
vma               362 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	    vma->node.start != entry->offset)
vma               366 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	    vma->node.start < BATCH_OFFSET_BIAS)
vma               370 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	    (vma->node.start + vma->node.size - 1) >> 32)
vma               374 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	    !i915_vma_is_map_and_fenceable(vma))
vma               383 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	   struct i915_vma *vma)
vma               385 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	unsigned int exec_flags = *vma->exec_flags;
vma               388 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (vma->node.size)
vma               389 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		pin_flags = vma->node.start;
vma               397 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
vma               401 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (unlikely(i915_vma_pin_fence(vma))) {
vma               402 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_vma_unpin(vma);
vma               406 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (vma->fence)
vma               410 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
vma               411 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return !eb_vma_misplaced(entry, vma, exec_flags);
vma               414 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
vma               419 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		__i915_vma_unpin_fence(vma);
vma               421 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	__i915_vma_unpin(vma);
vma               425 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
vma               430 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	__eb_unreserve_vma(vma, *flags);
vma               437 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma)
vma               462 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (unlikely(vma->exec_flags)) {
vma               480 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		    i915_gem_object_is_tiled(vma->obj))
vma               493 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	   struct i915_vma *vma)
vma               498 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	GEM_BUG_ON(i915_vma_is_closed(vma));
vma               501 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = eb_validate_vma(eb, entry, vma);
vma               507 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma->exec_handle = entry->handle;
vma               508 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		hlist_add_head(&vma->exec_node,
vma               514 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		list_add_tail(&vma->reloc_link, &eb->relocs);
vma               522 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb->vma[i] = vma;
vma               524 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vma->exec_flags = &eb->flags[i];
vma               542 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		eb->batch = vma;
vma               546 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (eb_pin_vma(eb, entry, vma)) {
vma               547 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (entry->offset != vma->node.start) {
vma               548 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			entry->offset = vma->node.start | UPDATE;
vma               552 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		eb_unreserve_vma(vma, vma->exec_flags);
vma               554 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		list_add_tail(&vma->exec_link, &eb->unbound);
vma               555 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (drm_mm_node_allocated(&vma->node))
vma               556 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = i915_vma_unbind(vma);
vma               558 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			vma->exec_flags = NULL;
vma               581 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			  struct i915_vma *vma)
vma               583 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
vma               584 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	unsigned int exec_flags = *vma->exec_flags;
vma               609 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = i915_vma_pin(vma,
vma               615 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (entry->offset != vma->node.start) {
vma               616 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		entry->offset = vma->node.start | UPDATE;
vma               621 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = i915_vma_pin_fence(vma);
vma               623 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_vma_unpin(vma);
vma               627 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (vma->fence)
vma               631 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
vma               632 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
vma               641 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_vma *vma;
vma               662 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		list_for_each_entry(vma, &eb->unbound, exec_link) {
vma               663 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = eb_reserve_vma(eb, vma);
vma               675 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			struct i915_vma *vma = eb->vma[i];
vma               681 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			eb_unreserve_vma(vma, &eb->flags[i]);
vma               685 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				list_add(&vma->exec_link, &eb->unbound);
vma               688 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				list_add_tail(&vma->exec_link, &eb->unbound);
vma               691 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				list_add(&vma->exec_link, &last);
vma               693 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				list_add_tail(&vma->exec_link, &last);
vma               765 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma;
vma               767 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma = radix_tree_lookup(handles_vma, handle);
vma               768 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (likely(vma))
vma               777 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma = i915_vma_instance(obj, eb->context->vm, NULL);
vma               778 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (IS_ERR(vma)) {
vma               779 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = PTR_ERR(vma);
vma               789 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = radix_tree_insert(handles_vma, handle, vma);
vma               796 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (!atomic_fetch_inc(&vma->open_count))
vma               797 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_vma_reopen(vma);
vma               806 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = eb_add_vma(eb, i, batch, vma);
vma               810 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		GEM_BUG_ON(vma != eb->vma[i]);
vma               811 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
vma               812 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
vma               813 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
vma               824 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb->vma[i] = NULL;
vma               836 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		return eb->vma[handle];
vma               839 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma;
vma               842 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		hlist_for_each_entry(vma, head, exec_node) {
vma               843 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			if (vma->exec_handle == handle)
vma               844 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				return vma;
vma               856 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma = eb->vma[i];
vma               859 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (!vma)
vma               862 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
vma               863 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma->exec_flags = NULL;
vma               864 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		eb->vma[i] = NULL;
vma               867 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			__eb_unreserve_vma(vma, flags);
vma               870 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_vma_put(vma);
vma              1029 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma;
vma              1044 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
vma              1048 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (IS_ERR(vma)) {
vma              1058 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			cache->node.start = vma->node.start;
vma              1059 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			cache->node.mm = (void *)vma;
vma              1122 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
vma              1124 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct drm_i915_gem_object *obj = vma->obj;
vma              1127 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	i915_vma_lock(vma);
vma              1133 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = i915_request_await_object(rq, vma->obj, true);
vma              1135 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma              1137 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	i915_vma_unlock(vma);
vma              1143 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			     struct i915_vma *vma,
vma              1166 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
vma              1186 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = reloc_move_to_gpu(rq, vma);
vma              1228 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		      struct i915_vma *vma,
vma              1247 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = __reloc_gpu_alloc(eb, vma, len);
vma              1259 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c relocate_entry(struct i915_vma *vma,
vma              1271 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
vma              1284 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		batch = reloc_gpu(eb, vma, len);
vma              1288 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		addr = gen8_canonical_addr(vma->node.start + offset);
vma              1329 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
vma              1350 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		  struct i915_vma *vma,
vma              1413 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
vma              1418 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			  (int)vma->size);
vma              1437 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
vma              1440 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return relocate_entry(vma, reloc, eb, target);
vma              1443 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
vma              1448 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
vma              1488 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			u64 offset = eb_relocate_entry(eb, vma, r);
vma              1531 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
vma              1533 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
vma              1540 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
vma              1684 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_vma *vma;
vma              1740 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	list_for_each_entry(vma, &eb->relocs, reloc_link) {
vma              1743 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = eb_relocate_vma(eb, vma);
vma              1748 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = eb_relocate_vma_slow(eb, vma);
vma              1793 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma;
vma              1795 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		list_for_each_entry(vma, &eb->relocs, reloc_link) {
vma              1796 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			if (eb_relocate_vma(eb, vma))
vma              1817 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma = eb->vma[i];
vma              1819 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
vma              1830 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				ww_mutex_unlock(&eb->vma[j]->resv->lock);
vma              1833 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				swap(eb->vma[i],  eb->vma[j]);
vma              1834 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				eb->vma[i]->exec_flags = &eb->flags[i];
vma              1836 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			GEM_BUG_ON(vma != eb->vma[0]);
vma              1837 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			vma->exec_flags = &eb->flags[0];
vma              1839 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
vma              1849 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma = eb->vma[i];
vma              1850 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct drm_i915_gem_object *obj = vma->obj;
vma              1852 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		assert_vma_held(vma);
vma              1860 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				capture->vma = vma;
vma              1888 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = i915_vma_move_to_active(vma, eb->request, flags);
vma              1890 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		i915_vma_unlock(vma);
vma              1892 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		__eb_unreserve_vma(vma, flags);
vma              1893 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma->exec_flags = NULL;
vma              1896 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_vma_put(vma);
vma              1967 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_vma * const vma = *eb->vma;
vma              1978 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	} else if (vma->vm->has_read_only) {
vma              1980 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vm = vma->vm;
vma              1993 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_vma *vma;
vma              2002 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vma = shadow_batch_pin(eb, pool->obj);
vma              2003 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (IS_ERR(vma))
vma              2009 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	shadow_batch_start = gen8_canonical_addr(vma->node.start);
vma              2021 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		i915_vma_unpin(vma);
vma              2031 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			vma = NULL;
vma              2033 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			vma = ERR_PTR(err);
vma              2037 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb->vma[eb->buffer_count] = i915_vma_get(vma);
vma              2040 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vma->exec_flags = &eb->flags[eb->buffer_count];
vma              2044 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb->batch = vma;
vma              2051 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	vma->private = pool;
vma              2052 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return vma;
vma              2056 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return vma;
vma              2502 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
vma              2503 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb.vma[0] = NULL;
vma              2504 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
vma              2604 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma;
vma              2606 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma = eb_parse(&eb);
vma              2607 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (IS_ERR(vma)) {
vma              2608 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = PTR_ERR(vma);
vma              2618 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_vma *vma;
vma              2630 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
vma              2631 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (IS_ERR(vma)) {
vma              2632 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			err = PTR_ERR(vma);
vma              2636 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		eb.batch = vma;
vma                20 drivers/gpu/drm/i915/gem/i915_gem_mman.c __vma_matches(struct vm_area_struct *vma, struct file *filp,
vma                23 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	if (vma->vm_file != filp)
vma                26 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	return vma->vm_start == addr &&
vma                27 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
vma                89 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		struct vm_area_struct *vma;
vma                95 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		vma = find_vma(mm, addr);
vma                96 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
vma                97 drivers/gpu/drm/i915/gem/i915_gem_mman.c 			vma->vm_page_prot =
vma                98 drivers/gpu/drm/i915/gem/i915_gem_mman.c 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma               220 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	struct vm_area_struct *area = vmf->vma;
vma               228 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	struct i915_vma *vma;
vma               263 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
vma               267 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	if (IS_ERR(vma)) {
vma               282 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
vma               283 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		if (IS_ERR(vma)) {
vma               286 drivers/gpu/drm/i915/gem/i915_gem_mman.c 			vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
vma               289 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	if (IS_ERR(vma)) {
vma               290 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		ret = PTR_ERR(vma);
vma               294 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	ret = i915_vma_pin_fence(vma);
vma               300 drivers/gpu/drm/i915/gem/i915_gem_mman.c 			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
vma               301 drivers/gpu/drm/i915/gem/i915_gem_mman.c 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
vma               302 drivers/gpu/drm/i915/gem/i915_gem_mman.c 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
vma               311 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
vma               321 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		i915_vma_set_ggtt_write(vma);
vma               326 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	i915_vma_unpin_fence(vma);
vma               328 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	__i915_vma_unpin(vma);
vma               377 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	struct i915_vma *vma;
vma               386 drivers/gpu/drm/i915/gem/i915_gem_mman.c 	for_each_ggtt_vma(vma, obj)
vma               387 drivers/gpu/drm/i915/gem/i915_gem_mman.c 		i915_vma_unset_userfault(vma);
vma                54 drivers/gpu/drm/i915/gem/i915_gem_object.c 	spin_lock_init(&obj->vma.lock);
vma                55 drivers/gpu/drm/i915/gem/i915_gem_object.c 	INIT_LIST_HEAD(&obj->vma.list);
vma               113 drivers/gpu/drm/i915/gem/i915_gem_object.c 		struct i915_vma *vma;
vma               121 drivers/gpu/drm/i915/gem/i915_gem_object.c 		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
vma               122 drivers/gpu/drm/i915/gem/i915_gem_object.c 		if (vma) {
vma               123 drivers/gpu/drm/i915/gem/i915_gem_object.c 			GEM_BUG_ON(vma->obj != obj);
vma               124 drivers/gpu/drm/i915/gem/i915_gem_object.c 			GEM_BUG_ON(!atomic_read(&vma->open_count));
vma               125 drivers/gpu/drm/i915/gem/i915_gem_object.c 			if (atomic_dec_and_test(&vma->open_count) &&
vma               126 drivers/gpu/drm/i915/gem/i915_gem_object.c 			    !i915_vma_is_ggtt(vma))
vma               127 drivers/gpu/drm/i915/gem/i915_gem_object.c 				i915_vma_close(vma);
vma               158 drivers/gpu/drm/i915/gem/i915_gem_object.c 		struct i915_vma *vma, *vn;
vma               164 drivers/gpu/drm/i915/gem/i915_gem_object.c 		list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
vma               165 drivers/gpu/drm/i915/gem/i915_gem_object.c 			GEM_BUG_ON(i915_vma_is_active(vma));
vma               166 drivers/gpu/drm/i915/gem/i915_gem_object.c 			vma->flags &= ~I915_VMA_PIN_MASK;
vma               167 drivers/gpu/drm/i915/gem/i915_gem_object.c 			i915_vma_destroy(vma);
vma               169 drivers/gpu/drm/i915/gem/i915_gem_object.c 		GEM_BUG_ON(!list_empty(&obj->vma.list));
vma               170 drivers/gpu/drm/i915/gem/i915_gem_object.c 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
vma               261 drivers/gpu/drm/i915/gem/i915_gem_object.c 	struct i915_vma *vma;
vma               270 drivers/gpu/drm/i915/gem/i915_gem_object.c 		for_each_ggtt_vma(vma, obj)
vma               271 drivers/gpu/drm/i915/gem/i915_gem_object.c 			intel_gt_flush_ggtt_writes(vma->vm->gt);
vma               275 drivers/gpu/drm/i915/gem/i915_gem_object.c 		for_each_ggtt_vma(vma, obj) {
vma               276 drivers/gpu/drm/i915/gem/i915_gem_object.c 			if (vma->iomap)
vma               279 drivers/gpu/drm/i915/gem/i915_gem_object.c 			i915_vma_unset_ggtt_write(vma);
vma               401 drivers/gpu/drm/i915/gem/i915_gem_object.h void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
vma                15 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 					 struct i915_vma *vma,
vma                32 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	count = div_u64(vma->size, block_size);
vma                47 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	rem = vma->size;
vma                48 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	offset = vma->node.start;
vma               104 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
vma               108 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_lock(vma);
vma               109 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_request_await_object(rq, vma->obj, false);
vma               111 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = i915_vma_move_to_active(vma, rq, 0);
vma               112 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_unlock(vma);
vma               116 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	return intel_engine_pool_mark_active(vma->private, rq);
vma               119 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
vma               121 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_unpin(vma);
vma               122 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	intel_engine_pool_put(vma->private);
vma               132 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct i915_vma *vma;
vma               135 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	vma = i915_vma_instance(obj, ce->vm, NULL);
vma               136 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(vma))
vma               137 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		return PTR_ERR(vma);
vma               139 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               149 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	batch = intel_emit_vma_fill_blt(ce, vma, value);
vma               175 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_lock(vma);
vma               176 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_request_await_object(rq, vma->obj, true);
vma               178 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma               179 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_unlock(vma);
vma               194 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_unpin(vma);
vma               303 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
vma               305 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct drm_i915_gem_object *obj = vma->obj;
vma               319 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct i915_vma *vma[2], *batch;
vma               324 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	vma[0] = i915_vma_instance(src, vm, NULL);
vma               325 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(vma[0]))
vma               326 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		return PTR_ERR(vma[0]);
vma               328 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
vma               332 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	vma[1] = i915_vma_instance(dst, vm, NULL);
vma               333 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(vma[1]))
vma               336 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
vma               340 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
vma               360 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	for (i = 0; i < ARRAY_SIZE(vma); i++) {
vma               361 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = move_to_gpu(vma[i], rq, i);
vma               366 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	for (i = 0; i < ARRAY_SIZE(vma); i++) {
vma               369 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = i915_vma_move_to_active(vma[i], rq, flags);
vma               393 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_unpin(vma[1]);
vma               395 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_vma_unpin(vma[0]);
vma                19 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h 					 struct i915_vma *vma,
vma                26 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
vma                27 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
vma                95 drivers/gpu/drm/i915/gem/i915_gem_object_types.h 	} vma;
vma               426 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	struct i915_vma *vma, *next;
vma               442 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 	list_for_each_entry_safe(vma, next,
vma               444 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		unsigned long count = vma->node.size >> PAGE_SHIFT;
vma               446 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		if (!vma->iomap || i915_vma_is_active(vma))
vma               450 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 		if (i915_vma_unbind(vma) == 0)
vma               618 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	struct i915_vma *vma;
vma               666 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma               667 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	if (IS_ERR(vma)) {
vma               668 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 		ret = PTR_ERR(vma);
vma               677 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
vma               685 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               687 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	vma->pages = obj->mm.pages;
vma               688 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	vma->flags |= I915_VMA_GLOBAL_BIND;
vma               689 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	__i915_vma_set_map_and_fenceable(vma);
vma               692 drivers/gpu/drm/i915/gem/i915_gem_stolen.c 	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
vma               159 drivers/gpu/drm/i915/gem/i915_gem_tiling.c static bool i915_vma_fence_prepare(struct i915_vma *vma,
vma               162 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma               165 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	if (!i915_vma_is_map_and_fenceable(vma))
vma               168 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
vma               169 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	if (vma->node.size < size)
vma               172 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
vma               173 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	if (!IS_ALIGNED(vma->node.start, alignment))
vma               184 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	struct i915_vma *vma;
vma               190 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	for_each_ggtt_vma(vma, obj) {
vma               191 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		if (i915_vma_fence_prepare(vma, tiling_mode, stride))
vma               194 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		ret = i915_vma_unbind(vma);
vma               207 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	struct i915_vma *vma;
vma               267 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 	for_each_ggtt_vma(vma, obj) {
vma               268 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		vma->fence_size =
vma               269 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			i915_gem_fence_size(i915, vma->size, tiling, stride);
vma               270 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		vma->fence_alignment =
vma               272 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 						 vma->size, tiling, stride);
vma               274 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 		if (vma->fence)
vma               275 drivers/gpu/drm/i915/gem/i915_gem_tiling.c 			vma->fence->dirty = true;
vma               331 drivers/gpu/drm/i915/gem/selftests/huge_pages.c static int igt_check_page_sizes(struct i915_vma *vma)
vma               333 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma               335 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct drm_i915_gem_object *obj = vma->obj;
vma               338 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
vma               340 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       vma->page_sizes.sg & ~supported, supported);
vma               344 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
vma               346 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       vma->page_sizes.gtt & ~supported, supported);
vma               350 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
vma               352 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
vma               356 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
vma               358 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
vma               377 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct i915_vma *vma;
vma               410 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
vma               411 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (IS_ERR(vma)) {
vma               412 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				err = PTR_ERR(vma);
vma               416 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               420 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = igt_check_page_sizes(vma);
vma               422 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (vma->page_sizes.sg != combination) {
vma               424 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				       vma->page_sizes.sg, combination);
vma               428 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_unpin(vma);
vma               429 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_close(vma);
vma               441 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_close(vma);
vma               475 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		struct i915_vma *vma;
vma               495 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
vma               496 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (IS_ERR(vma)) {
vma               497 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = PTR_ERR(vma);
vma               501 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = i915_vma_pin(vma, 0, 0, flags);
vma               503 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_close(vma);
vma               508 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = igt_check_page_sizes(vma);
vma               510 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (vma->page_sizes.gtt != page_size) {
vma               512 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			       vma->page_sizes.gtt, page_size);
vma               516 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		i915_vma_unpin(vma);
vma               519 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_close(vma);
vma               529 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = i915_vma_unbind(vma);
vma               531 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				i915_vma_close(vma);
vma               535 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = i915_vma_pin(vma, 0, 0, flags | offset);
vma               537 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				i915_vma_close(vma);
vma               541 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = igt_check_page_sizes(vma);
vma               543 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
vma               545 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
vma               549 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_unpin(vma);
vma               552 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				i915_vma_close(vma);
vma               562 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		i915_vma_close(vma);
vma               585 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		struct i915_vma *vma;
vma               587 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
vma               588 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (!IS_ERR(vma))
vma               589 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_close(vma);
vma               612 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		struct i915_vma *vma;
vma               638 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
vma               639 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (IS_ERR(vma)) {
vma               640 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = PTR_ERR(vma);
vma               644 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               648 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = igt_check_page_sizes(vma);
vma               650 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_unpin(vma);
vma               675 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		i915_vma_unpin(vma);
vma               677 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
vma               678 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (!IS_ALIGNED(vma->node.start,
vma               681 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				       vma->node.start);
vma               686 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (!IS_ALIGNED(vma->node.size,
vma               689 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				       vma->node.size);
vma               695 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (vma->page_sizes.gtt != expected_gtt) {
vma               697 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			       vma->page_sizes.gtt, expected_gtt,
vma               782 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct i915_vma *vma;
vma               816 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
vma               817 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (IS_ERR(vma)) {
vma               818 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				err = PTR_ERR(vma);
vma               825 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = i915_vma_pin(vma, 0, 0, flags);
vma               829 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = igt_check_page_sizes(vma);
vma               833 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
vma               834 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				if (!IS_ALIGNED(vma->node.start,
vma               837 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 					       vma->node.start);
vma               842 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				if (!IS_ALIGNED(vma->node.size,
vma               845 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 					       vma->node.size);
vma               851 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			if (vma->page_sizes.gtt != expected_gtt) {
vma               853 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				       vma->page_sizes.gtt, expected_gtt, i,
vma               859 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_unpin(vma);
vma               860 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			i915_vma_close(vma);
vma               871 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_unpin(vma);
vma               873 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_close(vma);
vma               882 drivers/gpu/drm/i915/gem/selftests/huge_pages.c static int gpu_write(struct i915_vma *vma,
vma               890 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_gem_object_lock(vma->obj);
vma               891 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
vma               892 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_gem_object_unlock(vma->obj);
vma               896 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
vma               897 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			       vma->size >> PAGE_SHIFT, val);
vma               940 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct i915_vma *vma;
vma               943 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	vma = i915_vma_instance(obj, vm, NULL);
vma               944 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (IS_ERR(vma))
vma               945 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		return PTR_ERR(vma);
vma               947 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_vma_unbind(vma);
vma               951 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_vma_pin(vma, size, 0, flags | offset);
vma               963 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = igt_check_page_sizes(vma);
vma               967 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = gpu_write(vma, ctx, engine, dword, val);
vma               980 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_unpin(vma);
vma               982 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_destroy(vma);
vma              1319 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct i915_vma *vma;
vma              1350 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		vma = i915_vma_instance(obj, vm, NULL);
vma              1351 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (IS_ERR(vma)) {
vma              1352 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			err = PTR_ERR(vma);
vma              1356 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = i915_vma_pin(vma, SZ_2M, 0, flags);
vma              1360 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (vma->page_sizes.sg < page_size) {
vma              1366 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = igt_check_page_sizes(vma);
vma              1370 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		if (vma->page_sizes.gtt != page_size) {
vma              1381 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 				       vma->page_sizes.gtt, page_size);
vma              1390 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
vma              1394 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		i915_vma_unpin(vma);
vma              1395 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		i915_vma_close(vma);
vma              1404 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	vma = i915_vma_instance(obj, vm, NULL);
vma              1405 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (IS_ERR(vma)) {
vma              1406 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = PTR_ERR(vma);
vma              1410 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_vma_pin(vma, 0, 0, flags);
vma              1426 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
vma              1437 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_unpin(vma);
vma              1439 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_close(vma);
vma              1453 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct i915_vma *vma;
vma              1481 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	vma = i915_vma_instance(obj, vm, NULL);
vma              1482 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (IS_ERR(vma)) {
vma              1483 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = PTR_ERR(vma);
vma              1487 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma              1491 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = igt_check_page_sizes(vma);
vma              1493 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_unpin(vma);
vma              1495 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_close(vma);
vma              1512 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	struct i915_vma *vma;
vma              1531 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	vma = i915_vma_instance(obj, vm, NULL);
vma              1532 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (IS_ERR(vma)) {
vma              1533 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = PTR_ERR(vma);
vma              1537 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_vma_pin(vma, 0, 0, flags);
vma              1546 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = igt_check_page_sizes(vma);
vma              1555 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
vma              1560 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_unpin(vma);
vma              1579 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	err = i915_vma_pin(vma, 0, 0, flags);
vma              1591 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_unpin(vma);
vma              1593 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	i915_vma_close(vma);
vma                79 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	struct i915_vma *vma;
vma                89 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
vma                90 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	if (IS_ERR(vma))
vma                91 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		return PTR_ERR(vma);
vma                93 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	map = i915_vma_pin_iomap(vma);
vma                94 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_unpin(vma);
vma                99 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_unpin_iomap(vma);
vma               108 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	struct i915_vma *vma;
vma               118 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
vma               119 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	if (IS_ERR(vma))
vma               120 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		return PTR_ERR(vma);
vma               122 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	map = i915_vma_pin_iomap(vma);
vma               123 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_unpin(vma);
vma               128 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_unpin_iomap(vma);
vma               185 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	struct i915_vma *vma;
vma               195 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
vma               196 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	if (IS_ERR(vma))
vma               197 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		return PTR_ERR(vma);
vma               201 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		i915_vma_unpin(vma);
vma               208 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		i915_vma_unpin(vma);
vma               214 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
vma               215 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
vma               220 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		*cs++ = i915_ggtt_offset(vma) + offset;
vma               224 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		*cs++ = i915_ggtt_offset(vma) + offset;
vma               230 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_lock(vma);
vma               231 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	err = i915_request_await_object(rq, vma->obj, true);
vma               233 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma               234 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_unlock(vma);
vma               235 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_vma_unpin(vma);
vma               175 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_vma *vma;
vma               181 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	vma = i915_vma_instance(obj, vm, NULL);
vma               182 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(vma))
vma               183 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		return PTR_ERR(vma);
vma               191 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
vma               203 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = igt_gpu_fill_dw(vma,
vma               210 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma               579 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
vma               585 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (INTEL_GEN(vma->vm->i915) < 8)
vma               588 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
vma               600 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	*cmd++ = lower_32_bits(vma->node.start);
vma               601 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	*cmd++ = upper_32_bits(vma->node.start);
vma               607 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	vma = i915_vma_instance(obj, vma->vm, NULL);
vma               608 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(vma)) {
vma               609 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = PTR_ERR(vma);
vma               613 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               617 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	return vma;
vma               631 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_vma *vma;
vma               636 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	vma = i915_vma_instance(obj, ce->vm, NULL);
vma               637 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(vma))
vma               638 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		return PTR_ERR(vma);
vma               646 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               650 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	batch = rpcs_query_batch(vma);
vma               676 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_lock(vma);
vma               677 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, vma->obj, true);
vma               679 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma               680 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unlock(vma);
vma               688 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma               704 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma              1171 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_vma *vma;
vma              1200 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	vma = i915_vma_instance(obj, ctx->vm, NULL);
vma              1201 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(vma)) {
vma              1202 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = PTR_ERR(vma);
vma              1206 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
vma              1220 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
vma              1224 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_lock(vma);
vma              1225 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, vma->obj, false);
vma              1227 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(vma, rq, 0);
vma              1228 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unlock(vma);
vma              1232 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma              1233 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_close(vma);
vma              1234 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_put(vma);
vma              1245 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma              1260 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_vma *vma;
vma              1299 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	vma = i915_vma_instance(obj, ctx->vm, NULL);
vma              1300 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(vma)) {
vma              1301 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = PTR_ERR(vma);
vma              1305 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
vma              1319 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
vma              1323 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_lock(vma);
vma              1324 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, vma->obj, true);
vma              1326 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma              1327 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unlock(vma);
vma              1331 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma              1332 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_close(vma);
vma              1359 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_vma_unpin(vma);
vma                83 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	struct i915_vma *vma;
vma               122 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
vma               123 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		if (IS_ERR(vma)) {
vma               125 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       page, (int)PTR_ERR(vma));
vma               126 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			return PTR_ERR(vma);
vma               132 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		io = i915_vma_pin_iomap(vma);
vma               133 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		i915_vma_unpin(vma);
vma               141 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		i915_vma_unpin_iomap(vma);
vma               157 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       vma->size >> PAGE_SHIFT,
vma               159 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
vma               172 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		i915_vma_destroy(vma);
vma               333 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	struct i915_vma *vma;
vma               336 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
vma               337 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	if (IS_ERR(vma))
vma               338 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		return PTR_ERR(vma);
vma               340 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               349 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			i915_vma_unpin(vma);
vma               353 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		i915_vma_lock(vma);
vma               354 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		err = i915_request_await_object(rq, vma->obj, true);
vma               356 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			err = i915_vma_move_to_active(vma, rq,
vma               358 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		i915_vma_unlock(vma);
vma               363 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 	i915_vma_unpin(vma);
vma                39 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c igt_emit_store_dw(struct i915_vma *vma,
vma                45 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	const int gen = INTEL_GEN(vma->vm->i915);
vma                52 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	obj = i915_gem_object_create_internal(vma->vm->i915, size);
vma                62 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
vma                63 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	offset += vma->node.start;
vma                87 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	vma = i915_vma_instance(obj, vma->vm, NULL);
vma                88 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	if (IS_ERR(vma)) {
vma                89 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 		err = PTR_ERR(vma);
vma                93 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma                97 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	return vma;
vma               104 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c int igt_gpu_fill_dw(struct i915_vma *vma,
vma               117 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	GEM_BUG_ON(vma->size > vm->total);
vma               119 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma               121 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	batch = igt_emit_store_dw(vma, offset, count, val);
vma               149 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	i915_vma_lock(vma);
vma               150 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	err = i915_request_await_object(rq, vma->obj, true);
vma               152 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma               153 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	i915_vma_unlock(vma);
vma                21 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h igt_emit_store_dw(struct i915_vma *vma,
vma                26 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h int igt_gpu_fill_dw(struct i915_vma *vma,
vma                93 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
vma               113 drivers/gpu/drm/i915/gt/intel_context.c static int __context_pin_state(struct i915_vma *vma)
vma               118 drivers/gpu/drm/i915/gt/intel_context.c 	flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
vma               121 drivers/gpu/drm/i915/gt/intel_context.c 	err = i915_vma_pin(vma, 0, 0, flags);
vma               129 drivers/gpu/drm/i915/gt/intel_context.c 	i915_vma_make_unshrinkable(vma);
vma               130 drivers/gpu/drm/i915/gt/intel_context.c 	vma->obj->mm.dirty = true;
vma               135 drivers/gpu/drm/i915/gt/intel_context.c static void __context_unpin_state(struct i915_vma *vma)
vma               137 drivers/gpu/drm/i915/gt/intel_context.c 	__i915_vma_unpin(vma);
vma               138 drivers/gpu/drm/i915/gt/intel_context.c 	i915_vma_make_shrinkable(vma);
vma               494 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	struct i915_vma *vma;
vma               499 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	vma = fetch_and_zero(&engine->status_page.vma);
vma               500 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	if (!vma)
vma               504 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		i915_vma_unpin(vma);
vma               506 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	i915_gem_object_unpin_map(vma->obj);
vma               507 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	i915_gem_object_put(vma->obj);
vma               511 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				struct i915_vma *vma)
vma               532 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	return i915_vma_pin(vma, 0, 0, flags);
vma               538 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	struct i915_vma *vma;
vma               557 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
vma               558 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	if (IS_ERR(vma)) {
vma               559 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		ret = PTR_ERR(vma);
vma               570 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	engine->status_page.vma = vma;
vma               573 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		ret = pin_ggtt_status_page(engine, vma);
vma               680 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				engine->status_page.vma))
vma              1293 drivers/gpu/drm/i915/gt/intel_engine_cs.c 						i915_ggtt_offset(rq->ring->vma),
vma              1305 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				 i915_ggtt_offset(rq->ring->vma),
vma              1389 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   i915_ggtt_offset(rq->ring->vma));
vma              1536 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	return ring == i915_ggtt_offset(rq->ring->vma);
vma                67 drivers/gpu/drm/i915/gt/intel_engine_types.h 	struct i915_vma *vma;
vma                89 drivers/gpu/drm/i915/gt/intel_engine_types.h 	struct i915_vma *vma;
vma               130 drivers/gpu/drm/i915/gt/intel_engine_types.h 	struct i915_vma *vma;
vma               229 drivers/gpu/drm/i915/gt/intel_gt.c 	struct i915_vma *vma;
vma               240 drivers/gpu/drm/i915/gt/intel_gt.c 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
vma               241 drivers/gpu/drm/i915/gt/intel_gt.c 	if (IS_ERR(vma)) {
vma               242 drivers/gpu/drm/i915/gt/intel_gt.c 		ret = PTR_ERR(vma);
vma               246 drivers/gpu/drm/i915/gt/intel_gt.c 	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
vma               250 drivers/gpu/drm/i915/gt/intel_gt.c 	gt->scratch = i915_vma_make_unshrinkable(vma);
vma               246 drivers/gpu/drm/i915/gt/intel_lrc.c 	return (i915_ggtt_offset(engine->status_page.vma) +
vma              1772 drivers/gpu/drm/i915/gt/intel_lrc.c 	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
vma              2217 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_vma *vma;
vma              2224 drivers/gpu/drm/i915/gt/intel_lrc.c 	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
vma              2225 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (IS_ERR(vma)) {
vma              2226 drivers/gpu/drm/i915/gt/intel_lrc.c 		err = PTR_ERR(vma);
vma              2230 drivers/gpu/drm/i915/gt/intel_lrc.c 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
vma              2234 drivers/gpu/drm/i915/gt/intel_lrc.c 	engine->wa_ctx.vma = vma;
vma              2244 drivers/gpu/drm/i915/gt/intel_lrc.c 	i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
vma              2290 drivers/gpu/drm/i915/gt/intel_lrc.c 	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
vma              2337 drivers/gpu/drm/i915/gt/intel_lrc.c 			i915_ggtt_offset(engine->status_page.vma));
vma              3236 drivers/gpu/drm/i915/gt/intel_lrc.c 			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
vma              3248 drivers/gpu/drm/i915/gt/intel_lrc.c 			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
vma              3352 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_vma *vma;
vma              3371 drivers/gpu/drm/i915/gt/intel_lrc.c 	vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
vma              3372 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (IS_ERR(vma)) {
vma              3373 drivers/gpu/drm/i915/gt/intel_lrc.c 		ret = PTR_ERR(vma);
vma              3402 drivers/gpu/drm/i915/gt/intel_lrc.c 	ce->state = vma;
vma                34 drivers/gpu/drm/i915/gt/intel_renderstate.c 	struct i915_vma *vma;
vma                96 drivers/gpu/drm/i915/gt/intel_renderstate.c 			u64 r = s + so->vma->node.start;
vma               118 drivers/gpu/drm/i915/gt/intel_renderstate.c 	so->batch_offset = i915_ggtt_offset(so->vma);
vma               196 drivers/gpu/drm/i915/gt/intel_renderstate.c 	so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
vma               197 drivers/gpu/drm/i915/gt/intel_renderstate.c 	if (IS_ERR(so.vma)) {
vma               198 drivers/gpu/drm/i915/gt/intel_renderstate.c 		err = PTR_ERR(so.vma);
vma               202 drivers/gpu/drm/i915/gt/intel_renderstate.c 	err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
vma               224 drivers/gpu/drm/i915/gt/intel_renderstate.c 	i915_vma_lock(so.vma);
vma               225 drivers/gpu/drm/i915/gt/intel_renderstate.c 	err = i915_request_await_object(rq, so.vma->obj, false);
vma               227 drivers/gpu/drm/i915/gt/intel_renderstate.c 		err = i915_vma_move_to_active(so.vma, rq, 0);
vma               228 drivers/gpu/drm/i915/gt/intel_renderstate.c 	i915_vma_unlock(so.vma);
vma               230 drivers/gpu/drm/i915/gt/intel_renderstate.c 	i915_vma_unpin(so.vma);
vma               232 drivers/gpu/drm/i915/gt/intel_renderstate.c 	i915_vma_close(so.vma);
vma               629 drivers/gpu/drm/i915/gt/intel_reset.c 		struct i915_vma *vma;
vma               632 drivers/gpu/drm/i915/gt/intel_reset.c 		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
vma               633 drivers/gpu/drm/i915/gt/intel_reset.c 		if (!vma)
vma               636 drivers/gpu/drm/i915/gt/intel_reset.c 		if (!i915_vma_has_userfault(vma))
vma               639 drivers/gpu/drm/i915/gt/intel_reset.c 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
vma               640 drivers/gpu/drm/i915/gt/intel_reset.c 		node = &vma->obj->base.vma_node;
vma               641 drivers/gpu/drm/i915/gt/intel_reset.c 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
vma               644 drivers/gpu/drm/i915/gt/intel_reset.c 				    vma->size,
vma               442 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
vma               462 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
vma               519 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
vma               595 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
vma               689 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
vma               716 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			  i915_ggtt_offset(ring->vma));
vma               940 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
vma               963 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
vma              1189 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma = ring->vma;
vma              1200 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
vma              1202 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (vma->obj->stolen)
vma              1207 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ret = i915_vma_pin(vma, 0, 0, flags);
vma              1211 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (i915_vma_is_map_and_fenceable(vma))
vma              1212 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		addr = (void __force *)i915_vma_pin_iomap(vma);
vma              1214 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		addr = i915_gem_object_pin_map(vma->obj,
vma              1215 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 					       i915_coherent_map_type(vma->vm->i915));
vma              1221 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_make_unshrinkable(vma);
vma              1229 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_unpin(vma);
vma              1246 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma = ring->vma;
vma              1254 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_unset_ggtt_write(vma);
vma              1255 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (i915_vma_is_map_and_fenceable(vma))
vma              1256 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		i915_vma_unpin_iomap(vma);
vma              1258 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		i915_gem_object_unpin_map(vma->obj);
vma              1263 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_unpin(vma);
vma              1264 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_make_purgeable(vma);
vma              1272 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma;
vma              1287 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	vma = i915_vma_instance(obj, vm, NULL);
vma              1288 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (IS_ERR(vma))
vma              1291 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return vma;
vma              1295 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return vma;
vma              1303 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma;
vma              1327 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	vma = create_ring_vma(engine->gt->ggtt, size);
vma              1328 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (IS_ERR(vma)) {
vma              1330 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		return ERR_CAST(vma);
vma              1332 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->vma = vma;
vma              1341 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_close(ring->vma);
vma              1342 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_put(ring->vma);
vma              1407 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma;
vma              1455 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
vma              1456 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (IS_ERR(vma)) {
vma              1457 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		err = PTR_ERR(vma);
vma              1461 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return vma;
vma              1481 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		struct i915_vma *vma;
vma              1483 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		vma = alloc_context_vma(engine);
vma              1484 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		if (IS_ERR(vma))
vma              1485 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			return PTR_ERR(vma);
vma              1487 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ce->state = vma;
vma              2343 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
vma              2372 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
vma                22 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct i915_vma *vma;
vma                38 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct i915_vma *vma;
vma                46 drivers/gpu/drm/i915/gt/intel_timeline.c 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
vma                47 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (IS_ERR(vma))
vma                50 drivers/gpu/drm/i915/gt/intel_timeline.c 	return vma;
vma                67 drivers/gpu/drm/i915/gt/intel_timeline.c 		struct i915_vma *vma;
vma                75 drivers/gpu/drm/i915/gt/intel_timeline.c 		vma = __hwsp_alloc(timeline->gt);
vma                76 drivers/gpu/drm/i915/gt/intel_timeline.c 		if (IS_ERR(vma)) {
vma                78 drivers/gpu/drm/i915/gt/intel_timeline.c 			return vma;
vma                81 drivers/gpu/drm/i915/gt/intel_timeline.c 		vma->private = hwsp;
vma                83 drivers/gpu/drm/i915/gt/intel_timeline.c 		hwsp->vma = vma;
vma                99 drivers/gpu/drm/i915/gt/intel_timeline.c 	GEM_BUG_ON(hwsp->vma->private != hwsp);
vma               100 drivers/gpu/drm/i915/gt/intel_timeline.c 	return hwsp->vma;
vma               119 drivers/gpu/drm/i915/gt/intel_timeline.c 		i915_vma_put(hwsp->vma);
vma               131 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_gem_object_unpin_map(cl->hwsp->vma->obj);
vma               132 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_vma_put(cl->hwsp->vma);
vma               144 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_vma_unpin(cl->hwsp->vma);
vma               154 drivers/gpu/drm/i915/gt/intel_timeline.c 	__i915_vma_pin(cl->hwsp->vma);
vma               170 drivers/gpu/drm/i915/gt/intel_timeline.c 	vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
vma               176 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_vma_get(hwsp->vma);
vma               397 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct i915_vma *vma;
vma               420 drivers/gpu/drm/i915/gt/intel_timeline.c 	vma = hwsp_alloc(tl, &cacheline);
vma               421 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (IS_ERR(vma)) {
vma               422 drivers/gpu/drm/i915/gt/intel_timeline.c 		err = PTR_ERR(vma);
vma               426 drivers/gpu/drm/i915/gt/intel_timeline.c 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
vma               428 drivers/gpu/drm/i915/gt/intel_timeline.c 		__idle_hwsp_free(vma->private, cacheline);
vma               432 drivers/gpu/drm/i915/gt/intel_timeline.c 	cl = cacheline_alloc(vma->private, cacheline);
vma               435 drivers/gpu/drm/i915/gt/intel_timeline.c 		__idle_hwsp_free(vma->private, cacheline);
vma               438 drivers/gpu/drm/i915/gt/intel_timeline.c 	GEM_BUG_ON(cl->hwsp->vma != vma);
vma               455 drivers/gpu/drm/i915/gt/intel_timeline.c 	tl->hwsp_ggtt = i915_vma_get(vma);
vma               462 drivers/gpu/drm/i915/gt/intel_timeline.c 	tl->hwsp_offset += i915_ggtt_offset(vma);
vma               474 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_vma_unpin(vma);
vma               517 drivers/gpu/drm/i915/gt/intel_timeline.c 			*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
vma              1419 drivers/gpu/drm/i915/gt/intel_workarounds.c 	struct i915_vma *vma;
vma              1430 drivers/gpu/drm/i915/gt/intel_workarounds.c 	vma = i915_vma_instance(obj, vm, NULL);
vma              1431 drivers/gpu/drm/i915/gt/intel_workarounds.c 	if (IS_ERR(vma)) {
vma              1432 drivers/gpu/drm/i915/gt/intel_workarounds.c 		err = PTR_ERR(vma);
vma              1436 drivers/gpu/drm/i915/gt/intel_workarounds.c 	err = i915_vma_pin(vma, 0, 0,
vma              1437 drivers/gpu/drm/i915/gt/intel_workarounds.c 			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
vma              1441 drivers/gpu/drm/i915/gt/intel_workarounds.c 	return vma;
vma              1464 drivers/gpu/drm/i915/gt/intel_workarounds.c 	    struct i915_vma *vma)
vma              1492 drivers/gpu/drm/i915/gt/intel_workarounds.c 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
vma              1506 drivers/gpu/drm/i915/gt/intel_workarounds.c 	struct i915_vma *vma;
vma              1514 drivers/gpu/drm/i915/gt/intel_workarounds.c 	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
vma              1515 drivers/gpu/drm/i915/gt/intel_workarounds.c 	if (IS_ERR(vma))
vma              1516 drivers/gpu/drm/i915/gt/intel_workarounds.c 		return PTR_ERR(vma);
vma              1524 drivers/gpu/drm/i915/gt/intel_workarounds.c 	err = wa_list_srm(rq, wal, vma);
vma              1534 drivers/gpu/drm/i915/gt/intel_workarounds.c 	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma              1549 drivers/gpu/drm/i915/gt/intel_workarounds.c 	i915_gem_object_unpin_map(vma->obj);
vma              1552 drivers/gpu/drm/i915/gt/intel_workarounds.c 	i915_vma_unpin(vma);
vma              1553 drivers/gpu/drm/i915/gt/intel_workarounds.c 	i915_vma_put(vma);
vma               114 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static int move_to_active(struct i915_vma *vma,
vma               120 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_lock(vma);
vma               121 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_request_await_object(rq, vma->obj,
vma               124 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = i915_vma_move_to_active(vma, rq, flags);
vma               125 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_unlock(vma);
vma               137 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_vma *hws, *vma;
vma               159 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	vma = i915_vma_instance(h->obj, vm, NULL);
vma               160 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(vma))
vma               161 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		return ERR_CAST(vma);
vma               167 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               181 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = move_to_active(vma, rq, 0);
vma               202 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(vma->node.start);
vma               203 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = upper_32_bits(vma->node.start);
vma               216 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(vma->node.start);
vma               229 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(vma->node.start);
vma               241 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(vma->node.start);
vma               256 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
vma               266 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_unpin(vma);
vma              1123 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_vma *vma;
vma              1129 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_address_space *vm = arg->vma->vm;
vma              1131 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct drm_mm_node evict = arg->vma->node;
vma              1146 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct drm_i915_private *i915 = arg->vma->vm->i915;
vma              1154 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
vma              1160 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
vma              1166 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_vma_pin_fence(arg->vma);
vma              1167 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_unpin(arg->vma);
vma              1173 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_unpin_fence(arg->vma);
vma              1218 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	arg.vma = i915_vma_instance(obj, vm, NULL);
vma              1219 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(arg.vma)) {
vma              1220 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(arg.vma);
vma              1230 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_vma_pin(arg.vma, 0, 0,
vma              1231 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			   i915_vma_is_ggtt(arg.vma) ?
vma              1240 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = i915_vma_pin_fence(arg.vma);
vma              1243 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			i915_vma_unpin(arg.vma);
vma              1249 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_lock(arg.vma);
vma              1250 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_request_await_object(rq, arg.vma->obj,
vma              1253 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = i915_vma_move_to_active(arg.vma, rq, flags);
vma              1254 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_unlock(arg.vma);
vma              1257 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_vma_unpin_fence(arg.vma);
vma              1258 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_vma_unpin(arg.vma);
vma                83 drivers/gpu/drm/i915/gt/selftest_lrc.c emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
vma                98 drivers/gpu/drm/i915/gt/selftest_lrc.c 	*cs++ = i915_ggtt_offset(vma) + 4 * idx;
vma               103 drivers/gpu/drm/i915/gt/selftest_lrc.c 		*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
vma               120 drivers/gpu/drm/i915/gt/selftest_lrc.c semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
vma               134 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = emit_semaphore_chain(rq, vma, idx);
vma               146 drivers/gpu/drm/i915/gt/selftest_lrc.c 	      struct i915_vma *vma,
vma               166 drivers/gpu/drm/i915/gt/selftest_lrc.c 	*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
vma               180 drivers/gpu/drm/i915/gt/selftest_lrc.c 		      struct i915_vma *vma,
vma               188 drivers/gpu/drm/i915/gt/selftest_lrc.c 	head = semaphore_queue(outer, vma, n++);
vma               197 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq = semaphore_queue(engine, vma, n++);
vma               205 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = release_queue(outer, vma, n);
vma               229 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_vma *vma;
vma               252 drivers/gpu/drm/i915/gt/selftest_lrc.c 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
vma               253 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (IS_ERR(vma)) {
vma               254 drivers/gpu/drm/i915/gt/selftest_lrc.c 		err = PTR_ERR(vma);
vma               264 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
vma               278 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = slice_semaphore_queue(engine, vma, count);
vma               290 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_vma_unpin(vma);
vma               308 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_vma *vma;
vma               346 drivers/gpu/drm/i915/gt/selftest_lrc.c 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
vma               347 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (IS_ERR(vma)) {
vma               348 drivers/gpu/drm/i915/gt/selftest_lrc.c 		err = PTR_ERR(vma);
vma               352 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
vma               395 drivers/gpu/drm/i915/gt/selftest_lrc.c 		*cs++ = i915_ggtt_offset(vma);
vma               406 drivers/gpu/drm/i915/gt/selftest_lrc.c 		*cs++ = i915_ggtt_offset(vma);
vma               439 drivers/gpu/drm/i915/gt/selftest_lrc.c 		*cs++ = i915_ggtt_offset(vma);
vma               469 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_vma_unpin(vma);
vma              1439 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_vma *vma = NULL;
vma              1443 drivers/gpu/drm/i915/gt/selftest_lrc.c 		vma = i915_vma_instance(batch, ctx->vm, NULL);
vma              1444 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(vma))
vma              1445 drivers/gpu/drm/i915/gt/selftest_lrc.c 			return PTR_ERR(vma);
vma              1447 drivers/gpu/drm/i915/gt/selftest_lrc.c 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma              1460 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (vma) {
vma              1461 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_vma_lock(vma);
vma              1462 drivers/gpu/drm/i915/gt/selftest_lrc.c 		err = i915_request_await_object(rq, vma->obj, false);
vma              1464 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = i915_vma_move_to_active(vma, rq, 0);
vma              1467 drivers/gpu/drm/i915/gt/selftest_lrc.c 							vma->node.start,
vma              1469 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_vma_unlock(vma);
vma              1475 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (vma)
vma              1476 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_vma_unpin(vma);
vma                79 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_vma *vma;
vma                99 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
vma               100 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (IS_ERR(vma)) {
vma               101 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = PTR_ERR(vma);
vma               105 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
vma               115 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_vma_lock(vma);
vma               116 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	err = i915_request_await_object(rq, vma->obj, true);
vma               118 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
vma               119 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_vma_unlock(vma);
vma               136 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
vma               142 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_vma_unpin(vma);
vma               149 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_vma_unpin(vma);
vma               358 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_vma *vma;
vma               365 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	vma = i915_vma_instance(obj, ctx->vm, NULL);
vma               366 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (IS_ERR(vma)) {
vma               367 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = PTR_ERR(vma);
vma               371 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               375 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	return vma;
vma                87 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	struct i915_vma *vma;
vma                90 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
vma                91 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	if (IS_ERR(vma))
vma                92 drivers/gpu/drm/i915/gt/uc/intel_guc.c 		return PTR_ERR(vma);
vma                94 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma                96 drivers/gpu/drm/i915/gt/uc/intel_guc.c 		i915_vma_unpin_and_release(&vma, 0);
vma               100 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	guc->shared_data = vma;
vma               154 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
vma               595 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	struct i915_vma *vma;
vma               603 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
vma               604 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	if (IS_ERR(vma))
vma               607 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
vma               608 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	ret = i915_vma_pin(vma, 0, 0, flags);
vma               610 drivers/gpu/drm/i915/gt/uc/intel_guc.c 		vma = ERR_PTR(ret);
vma               614 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	return i915_vma_make_unshrinkable(vma);
vma               618 drivers/gpu/drm/i915/gt/uc/intel_guc.c 	return vma;
vma               126 drivers/gpu/drm/i915/gt/uc/intel_guc.h 					struct i915_vma *vma)
vma               128 drivers/gpu/drm/i915/gt/uc/intel_guc.h 	u32 offset = i915_ggtt_offset(vma);
vma               130 drivers/gpu/drm/i915/gt/uc/intel_guc.h 	GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
vma               131 drivers/gpu/drm/i915/gt/uc/intel_guc.h 	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
vma               138 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	struct i915_vma *vma;
vma               144 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	vma = intel_guc_allocate_vma(guc, size);
vma               145 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	if (IS_ERR(vma))
vma               146 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 		return PTR_ERR(vma);
vma               148 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma               154 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	guc->ads_vma = vma;
vma               128 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	struct i915_vma *vma;
vma               133 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	GEM_BUG_ON(ctch->vma);
vma               158 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
vma               159 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	if (IS_ERR(vma)) {
vma               160 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 		err = PTR_ERR(vma);
vma               163 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	ctch->vma = vma;
vma               166 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma               172 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 			intel_guc_ggtt_offset(guc, ctch->vma));
vma               184 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	i915_vma_unpin_and_release(&ctch->vma, 0);
vma               196 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
vma               206 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	GEM_BUG_ON(!ctch->vma);
vma               211 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	base = intel_guc_ggtt_offset(guc, ctch->vma);
vma               848 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 	GEM_BUG_ON(!ctch->vma);
vma                50 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h 	struct i915_vma *vma;
vma               157 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	relay_reserve(log->relay.channel, log->vma->obj->base.size);
vma               335 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	if (!log->vma)
vma               343 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
vma               356 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	i915_gem_object_unpin_map(log->vma->obj);
vma               375 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	GEM_BUG_ON(!log->vma);
vma               378 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	subbuf_size = log->vma->size;
vma               455 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	struct i915_vma *vma;
vma               459 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	GEM_BUG_ON(log->vma);
vma               483 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	vma = intel_guc_allocate_vma(guc, guc_log_size);
vma               484 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	if (IS_ERR(vma)) {
vma               485 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 		ret = PTR_ERR(vma);
vma               489 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	log->vma = vma;
vma               506 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	i915_vma_unpin_and_release(&log->vma, 0);
vma               517 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	GEM_BUG_ON(!log->vma);
vma               558 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c 	if (!log->vma)
vma                47 drivers/gpu/drm/i915/gt/uc/intel_guc_log.h 	struct i915_vma *vma;
vma               313 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct i915_vma *vma;
vma               316 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	vma = intel_guc_allocate_vma(guc,
vma               319 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	if (IS_ERR(vma))
vma               320 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		return PTR_ERR(vma);
vma               322 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma               324 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		i915_vma_unpin_and_release(&vma, 0);
vma               328 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	guc->stage_desc_pool = vma;
vma               369 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
vma               370 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
vma               482 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c static void flush_ggtt_writes(struct i915_vma *vma)
vma               484 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma               486 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	if (i915_vma_is_map_and_fenceable(vma))
vma               502 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		flush_ggtt_writes(rq->ring->vma);
vma               801 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct i915_vma *vma;
vma               822 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
vma               823 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	if (IS_ERR(vma)) {
vma               824 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		ret = PTR_ERR(vma);
vma               829 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	client->vma = vma;
vma               831 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma               862 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	i915_gem_object_unpin_map(client->vma->obj);
vma               864 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	i915_vma_unpin_and_release(&client->vma, 0);
vma               875 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
vma                40 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h 	struct i915_vma *vma;
vma                33 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	struct i915_vma *vma;
vma                53 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
vma                54 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	if (IS_ERR(vma))
vma                55 drivers/gpu/drm/i915/gt/uc/intel_huc.c 		return PTR_ERR(vma);
vma                57 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
vma                59 drivers/gpu/drm/i915/gt/uc/intel_huc.c 		i915_vma_unpin_and_release(&vma, 0);
vma                63 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
vma                66 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	i915_gem_object_unpin_map(vma->obj);
vma                68 drivers/gpu/drm/i915/gt/uc/intel_huc.c 	huc->rsa_data = vma;
vma               114 drivers/gpu/drm/i915/gt/uc/intel_uc.c 	if (guc->log.vma && !uc->load_err_log)
vma               115 drivers/gpu/drm/i915/gt/uc/intel_uc.c 		uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
vma              1146 drivers/gpu/drm/i915/gvt/kvmgt.c static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
vma              1154 drivers/gpu/drm/i915/gvt/kvmgt.c 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
vma              1158 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (vma->vm_end < vma->vm_start)
vma              1160 drivers/gpu/drm/i915/gvt/kvmgt.c 	if ((vma->vm_flags & VM_SHARED) == 0)
vma              1165 drivers/gpu/drm/i915/gvt/kvmgt.c 	pg_prot = vma->vm_page_prot;
vma              1166 drivers/gpu/drm/i915/gvt/kvmgt.c 	virtaddr = vma->vm_start;
vma              1167 drivers/gpu/drm/i915/gvt/kvmgt.c 	req_size = vma->vm_end - vma->vm_start;
vma              1168 drivers/gpu/drm/i915/gvt/kvmgt.c 	pgoff = vma->vm_pgoff &
vma              1180 drivers/gpu/drm/i915/gvt/kvmgt.c 	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
vma               490 drivers/gpu/drm/i915/gvt/scheduler.c 			bb->vma = i915_gem_object_ggtt_pin(bb->obj,
vma               492 drivers/gpu/drm/i915/gvt/scheduler.c 			if (IS_ERR(bb->vma)) {
vma               493 drivers/gpu/drm/i915/gvt/scheduler.c 				ret = PTR_ERR(bb->vma);
vma               498 drivers/gpu/drm/i915/gvt/scheduler.c 			bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
vma               514 drivers/gpu/drm/i915/gvt/scheduler.c 			ret = i915_vma_move_to_active(bb->vma,
vma               548 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_vma *vma;
vma               556 drivers/gpu/drm/i915/gvt/scheduler.c 	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
vma               558 drivers/gpu/drm/i915/gvt/scheduler.c 	if (IS_ERR(vma))
vma               559 drivers/gpu/drm/i915/gvt/scheduler.c 		return PTR_ERR(vma);
vma               566 drivers/gpu/drm/i915/gvt/scheduler.c 	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
vma               607 drivers/gpu/drm/i915/gvt/scheduler.c 			if (bb->vma && !IS_ERR(bb->vma)) {
vma               608 drivers/gpu/drm/i915/gvt/scheduler.c 				i915_vma_unpin(bb->vma);
vma               609 drivers/gpu/drm/i915/gvt/scheduler.c 				i915_vma_close(bb->vma);
vma               123 drivers/gpu/drm/i915/gvt/scheduler.h 	struct i915_vma *vma;
vma               140 drivers/gpu/drm/i915/i915_debugfs.c 	struct i915_vma *vma;
vma               158 drivers/gpu/drm/i915/i915_debugfs.c 	spin_lock(&obj->vma.lock);
vma               159 drivers/gpu/drm/i915/i915_debugfs.c 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
vma               160 drivers/gpu/drm/i915/i915_debugfs.c 		if (!drm_mm_node_allocated(&vma->node))
vma               163 drivers/gpu/drm/i915/i915_debugfs.c 		spin_unlock(&obj->vma.lock);
vma               165 drivers/gpu/drm/i915/i915_debugfs.c 		if (i915_vma_is_pinned(vma))
vma               169 drivers/gpu/drm/i915/i915_debugfs.c 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
vma               170 drivers/gpu/drm/i915/i915_debugfs.c 			   vma->node.start, vma->node.size,
vma               171 drivers/gpu/drm/i915/i915_debugfs.c 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
vma               172 drivers/gpu/drm/i915/i915_debugfs.c 		if (i915_vma_is_ggtt(vma)) {
vma               173 drivers/gpu/drm/i915/i915_debugfs.c 			switch (vma->ggtt_view.type) {
vma               180 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
vma               181 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
vma               186 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[0].width,
vma               187 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[0].height,
vma               188 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[0].stride,
vma               189 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[0].offset,
vma               190 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[1].width,
vma               191 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[1].height,
vma               192 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[1].stride,
vma               193 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.rotated.plane[1].offset);
vma               198 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[0].width,
vma               199 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[0].height,
vma               200 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[0].stride,
vma               201 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[0].offset,
vma               202 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[1].width,
vma               203 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[1].height,
vma               204 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[1].stride,
vma               205 drivers/gpu/drm/i915/i915_debugfs.c 					   vma->ggtt_view.remapped.plane[1].offset);
vma               209 drivers/gpu/drm/i915/i915_debugfs.c 				MISSING_CASE(vma->ggtt_view.type);
vma               213 drivers/gpu/drm/i915/i915_debugfs.c 		if (vma->fence)
vma               214 drivers/gpu/drm/i915/i915_debugfs.c 			seq_printf(m, " , fence: %d", vma->fence->id);
vma               217 drivers/gpu/drm/i915/i915_debugfs.c 		spin_lock(&obj->vma.lock);
vma               219 drivers/gpu/drm/i915/i915_debugfs.c 	spin_unlock(&obj->vma.lock);
vma               244 drivers/gpu/drm/i915/i915_debugfs.c 	struct i915_vma *vma;
vma               251 drivers/gpu/drm/i915/i915_debugfs.c 	spin_lock(&obj->vma.lock);
vma               253 drivers/gpu/drm/i915/i915_debugfs.c 		for_each_ggtt_vma(vma, obj) {
vma               254 drivers/gpu/drm/i915/i915_debugfs.c 			if (!drm_mm_node_allocated(&vma->node))
vma               257 drivers/gpu/drm/i915/i915_debugfs.c 			if (i915_vma_is_active(vma))
vma               258 drivers/gpu/drm/i915/i915_debugfs.c 				stats->active += vma->node.size;
vma               260 drivers/gpu/drm/i915/i915_debugfs.c 				stats->inactive += vma->node.size;
vma               262 drivers/gpu/drm/i915/i915_debugfs.c 			if (i915_vma_is_closed(vma))
vma               263 drivers/gpu/drm/i915/i915_debugfs.c 				stats->closed += vma->node.size;
vma               266 drivers/gpu/drm/i915/i915_debugfs.c 		struct rb_node *p = obj->vma.tree.rb_node;
vma               271 drivers/gpu/drm/i915/i915_debugfs.c 			vma = rb_entry(p, typeof(*vma), obj_node);
vma               272 drivers/gpu/drm/i915/i915_debugfs.c 			cmp = i915_vma_compare(vma, stats->vm, NULL);
vma               274 drivers/gpu/drm/i915/i915_debugfs.c 				if (drm_mm_node_allocated(&vma->node)) {
vma               275 drivers/gpu/drm/i915/i915_debugfs.c 					if (i915_vma_is_active(vma))
vma               276 drivers/gpu/drm/i915/i915_debugfs.c 						stats->active += vma->node.size;
vma               278 drivers/gpu/drm/i915/i915_debugfs.c 						stats->inactive += vma->node.size;
vma               280 drivers/gpu/drm/i915/i915_debugfs.c 					if (i915_vma_is_closed(vma))
vma               281 drivers/gpu/drm/i915/i915_debugfs.c 						stats->closed += vma->node.size;
vma               291 drivers/gpu/drm/i915/i915_debugfs.c 	spin_unlock(&obj->vma.lock);
vma               325 drivers/gpu/drm/i915/i915_debugfs.c 				per_file_stats(0, ce->ring->vma->obj, &kstats);
vma               656 drivers/gpu/drm/i915/i915_debugfs.c 		struct i915_vma *vma = reg->vma;
vma               660 drivers/gpu/drm/i915/i915_debugfs.c 		if (!vma)
vma               663 drivers/gpu/drm/i915/i915_debugfs.c 			describe_obj(m, vma->obj);
vma              1979 drivers/gpu/drm/i915/i915_debugfs.c 	else if (dev_priv->gt.uc.guc.log.vma)
vma              1980 drivers/gpu/drm/i915/i915_debugfs.c 		obj = dev_priv->gt.uc.guc.log.vma->obj;
vma               378 drivers/gpu/drm/i915/i915_drv.h 		struct i915_vma *vma;
vma               419 drivers/gpu/drm/i915/i915_drv.h 		struct i915_vma *vma;
vma              1148 drivers/gpu/drm/i915/i915_drv.h 		struct i915_vma *vma;
vma              2486 drivers/gpu/drm/i915/i915_drv.h int remap_io_mapping(struct vm_area_struct *vma,
vma                87 drivers/gpu/drm/i915/i915_gem.c 	struct i915_vma *vma;
vma                93 drivers/gpu/drm/i915/i915_gem.c 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
vma                94 drivers/gpu/drm/i915/i915_gem.c 		if (i915_vma_is_pinned(vma))
vma                95 drivers/gpu/drm/i915/i915_gem.c 			pinned += vma->node.size;
vma               108 drivers/gpu/drm/i915/i915_gem.c 	struct i915_vma *vma;
vma               114 drivers/gpu/drm/i915/i915_gem.c 	spin_lock(&obj->vma.lock);
vma               115 drivers/gpu/drm/i915/i915_gem.c 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
vma               118 drivers/gpu/drm/i915/i915_gem.c 		list_move_tail(&vma->obj_link, &still_in_list);
vma               119 drivers/gpu/drm/i915/i915_gem.c 		spin_unlock(&obj->vma.lock);
vma               123 drivers/gpu/drm/i915/i915_gem.c 		    !i915_vma_is_active(vma))
vma               124 drivers/gpu/drm/i915/i915_gem.c 			ret = i915_vma_unbind(vma);
vma               126 drivers/gpu/drm/i915/i915_gem.c 		spin_lock(&obj->vma.lock);
vma               128 drivers/gpu/drm/i915/i915_gem.c 	list_splice(&still_in_list, &obj->vma.list);
vma               129 drivers/gpu/drm/i915/i915_gem.c 	spin_unlock(&obj->vma.lock);
vma               337 drivers/gpu/drm/i915/i915_gem.c 	struct i915_vma *vma;
vma               346 drivers/gpu/drm/i915/i915_gem.c 	vma = ERR_PTR(-ENODEV);
vma               348 drivers/gpu/drm/i915/i915_gem.c 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
vma               352 drivers/gpu/drm/i915/i915_gem.c 	if (!IS_ERR(vma)) {
vma               353 drivers/gpu/drm/i915/i915_gem.c 		node.start = i915_ggtt_offset(vma);
vma               422 drivers/gpu/drm/i915/i915_gem.c 		i915_vma_unpin(vma);
vma               529 drivers/gpu/drm/i915/i915_gem.c 	struct i915_vma *vma;
vma               556 drivers/gpu/drm/i915/i915_gem.c 	vma = ERR_PTR(-ENODEV);
vma               558 drivers/gpu/drm/i915/i915_gem.c 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
vma               562 drivers/gpu/drm/i915/i915_gem.c 	if (!IS_ERR(vma)) {
vma               563 drivers/gpu/drm/i915/i915_gem.c 		node.start = i915_ggtt_offset(vma);
vma               643 drivers/gpu/drm/i915/i915_gem.c 		i915_vma_unpin(vma);
vma               882 drivers/gpu/drm/i915/i915_gem.c 		if (!reg->vma)
vma               885 drivers/gpu/drm/i915/i915_gem.c 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
vma               981 drivers/gpu/drm/i915/i915_gem.c 	struct i915_vma *vma;
vma              1021 drivers/gpu/drm/i915/i915_gem.c 	vma = i915_vma_instance(obj, vm, view);
vma              1022 drivers/gpu/drm/i915/i915_gem.c 	if (IS_ERR(vma))
vma              1023 drivers/gpu/drm/i915/i915_gem.c 		return vma;
vma              1025 drivers/gpu/drm/i915/i915_gem.c 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
vma              1027 drivers/gpu/drm/i915/i915_gem.c 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
vma              1031 drivers/gpu/drm/i915/i915_gem.c 			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
vma              1035 drivers/gpu/drm/i915/i915_gem.c 		WARN(i915_vma_is_pinned(vma),
vma              1039 drivers/gpu/drm/i915/i915_gem.c 		     i915_ggtt_offset(vma), alignment,
vma              1041 drivers/gpu/drm/i915/i915_gem.c 		     i915_vma_is_map_and_fenceable(vma));
vma              1042 drivers/gpu/drm/i915/i915_gem.c 		ret = i915_vma_unbind(vma);
vma              1047 drivers/gpu/drm/i915/i915_gem.c 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
vma              1048 drivers/gpu/drm/i915/i915_gem.c 		mutex_lock(&vma->vm->mutex);
vma              1049 drivers/gpu/drm/i915/i915_gem.c 		ret = i915_vma_revoke_fence(vma);
vma              1050 drivers/gpu/drm/i915/i915_gem.c 		mutex_unlock(&vma->vm->mutex);
vma              1055 drivers/gpu/drm/i915/i915_gem.c 	ret = i915_vma_pin(vma, size, alignment, flags);
vma              1059 drivers/gpu/drm/i915/i915_gem.c 	return vma;
vma                57 drivers/gpu/drm/i915/i915_gem_evict.c 	  struct i915_vma *vma,
vma                61 drivers/gpu/drm/i915/i915_gem_evict.c 	if (i915_vma_is_pinned(vma))
vma                64 drivers/gpu/drm/i915/i915_gem_evict.c 	list_add(&vma->evict_link, unwind);
vma                65 drivers/gpu/drm/i915/i915_gem_evict.c 	return drm_mm_scan_add_block(scan, &vma->node);
vma               101 drivers/gpu/drm/i915/i915_gem_evict.c 	struct i915_vma *vma, *next;
vma               142 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
vma               158 drivers/gpu/drm/i915/i915_gem_evict.c 		if (i915_vma_is_active(vma)) {
vma               159 drivers/gpu/drm/i915/i915_gem_evict.c 			if (vma == active) {
vma               168 drivers/gpu/drm/i915/i915_gem_evict.c 					active = vma;
vma               170 drivers/gpu/drm/i915/i915_gem_evict.c 				list_move_tail(&vma->vm_link, &vm->bound_list);
vma               175 drivers/gpu/drm/i915/i915_gem_evict.c 		if (mark_free(&scan, vma, flags, &eviction_list))
vma               180 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
vma               181 drivers/gpu/drm/i915/i915_gem_evict.c 		ret = drm_mm_scan_remove_block(&scan, &vma->node);
vma               226 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
vma               227 drivers/gpu/drm/i915/i915_gem_evict.c 		if (drm_mm_scan_remove_block(&scan, &vma->node))
vma               228 drivers/gpu/drm/i915/i915_gem_evict.c 			__i915_vma_pin(vma);
vma               230 drivers/gpu/drm/i915/i915_gem_evict.c 			list_del(&vma->evict_link);
vma               235 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
vma               236 drivers/gpu/drm/i915/i915_gem_evict.c 		__i915_vma_unpin(vma);
vma               238 drivers/gpu/drm/i915/i915_gem_evict.c 			ret = i915_vma_unbind(vma);
vma               242 drivers/gpu/drm/i915/i915_gem_evict.c 		vma = container_of(node, struct i915_vma, node);
vma               243 drivers/gpu/drm/i915/i915_gem_evict.c 		ret = i915_vma_unbind(vma);
vma               268 drivers/gpu/drm/i915/i915_gem_evict.c 	struct i915_vma *vma, *next;
vma               305 drivers/gpu/drm/i915/i915_gem_evict.c 		vma = container_of(node, typeof(*vma), node);
vma               325 drivers/gpu/drm/i915/i915_gem_evict.c 		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
vma               331 drivers/gpu/drm/i915/i915_gem_evict.c 		if (i915_vma_is_pinned(vma)) {
vma               333 drivers/gpu/drm/i915/i915_gem_evict.c 			if (vma->exec_flags &&
vma               334 drivers/gpu/drm/i915/i915_gem_evict.c 			    *vma->exec_flags & EXEC_OBJECT_PINNED)
vma               347 drivers/gpu/drm/i915/i915_gem_evict.c 		__i915_vma_pin(vma);
vma               348 drivers/gpu/drm/i915/i915_gem_evict.c 		list_add(&vma->evict_link, &eviction_list);
vma               351 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
vma               352 drivers/gpu/drm/i915/i915_gem_evict.c 		__i915_vma_unpin(vma);
vma               354 drivers/gpu/drm/i915/i915_gem_evict.c 			ret = i915_vma_unbind(vma);
vma               375 drivers/gpu/drm/i915/i915_gem_evict.c 	struct i915_vma *vma, *next;
vma               394 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry(vma, &vm->bound_list, vm_link) {
vma               395 drivers/gpu/drm/i915/i915_gem_evict.c 		if (i915_vma_is_pinned(vma))
vma               398 drivers/gpu/drm/i915/i915_gem_evict.c 		__i915_vma_pin(vma);
vma               399 drivers/gpu/drm/i915/i915_gem_evict.c 		list_add(&vma->evict_link, &eviction_list);
vma               404 drivers/gpu/drm/i915/i915_gem_evict.c 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
vma               405 drivers/gpu/drm/i915/i915_gem_evict.c 		__i915_vma_unpin(vma);
vma               407 drivers/gpu/drm/i915/i915_gem_evict.c 			ret = i915_vma_unbind(vma);
vma                63 drivers/gpu/drm/i915/i915_gem_fence_reg.c 				 struct i915_vma *vma)
vma                81 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (vma) {
vma                82 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		unsigned int stride = i915_gem_object_get_stride(vma->obj);
vma                84 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
vma                85 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
vma                86 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
vma                89 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
vma                90 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		val |= vma->node.start;
vma                92 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
vma               120 drivers/gpu/drm/i915/i915_gem_fence_reg.c 				 struct i915_vma *vma)
vma               125 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (vma) {
vma               126 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
vma               128 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		unsigned int stride = i915_gem_object_get_stride(vma->obj);
vma               130 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
vma               131 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
vma               132 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!is_power_of_2(vma->fence_size));
vma               133 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
vma               141 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		val = vma->node.start;
vma               144 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		val |= I915_FENCE_SIZE_BITS(vma->fence_size);
vma               160 drivers/gpu/drm/i915/i915_gem_fence_reg.c 				 struct i915_vma *vma)
vma               165 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (vma) {
vma               166 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		unsigned int stride = i915_gem_object_get_stride(vma->obj);
vma               168 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
vma               169 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
vma               170 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!is_power_of_2(vma->fence_size));
vma               172 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
vma               174 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		val = vma->node.start;
vma               175 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
vma               177 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		val |= I830_FENCE_SIZE_BITS(vma->fence_size);
vma               192 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			struct i915_vma *vma)
vma               201 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		i830_write_fence_reg(fence, vma);
vma               203 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		i915_write_fence_reg(fence, vma);
vma               205 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		i965_write_fence_reg(fence, vma);
vma               216 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			struct i915_vma *vma)
vma               222 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (vma) {
vma               223 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (!i915_vma_is_map_and_fenceable(vma))
vma               226 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (WARN(!i915_gem_object_get_stride(vma->obj) ||
vma               227 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			 !i915_gem_object_get_tiling(vma->obj),
vma               229 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			 i915_gem_object_get_stride(vma->obj),
vma               230 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			 i915_gem_object_get_tiling(vma->obj)))
vma               233 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		ret = i915_active_wait(&vma->active);
vma               238 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	old = xchg(&fence->vma, NULL);
vma               242 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			fence->vma = old;
vma               252 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (old != vma) {
vma               273 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(vma);
vma               277 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	WRITE_ONCE(fence->vma, vma);
vma               278 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	fence_write(fence, vma);
vma               280 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (vma) {
vma               281 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		vma->fence = fence;
vma               300 drivers/gpu/drm/i915/i915_gem_fence_reg.c int i915_vma_revoke_fence(struct i915_vma *vma)
vma               302 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	struct i915_fence_reg *fence = vma->fence;
vma               304 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	lockdep_assert_held(&vma->vm->mutex);
vma               319 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
vma               334 drivers/gpu/drm/i915/i915_gem_fence_reg.c static int __i915_vma_pin_fence(struct i915_vma *vma)
vma               336 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
vma               338 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
vma               342 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (vma->fence) {
vma               343 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		fence = vma->fence;
vma               344 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(fence->vma != vma);
vma               351 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		fence = fence_find(vma->vm->i915);
vma               365 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	GEM_BUG_ON(fence->vma != set);
vma               366 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	GEM_BUG_ON(vma->fence != (set ? fence : NULL));
vma               394 drivers/gpu/drm/i915/i915_gem_fence_reg.c int i915_vma_pin_fence(struct i915_vma *vma)
vma               402 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
vma               403 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma               404 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
vma               406 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	err = mutex_lock_interruptible(&vma->vm->mutex);
vma               410 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	err = __i915_vma_pin_fence(vma);
vma               411 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	mutex_unlock(&vma->vm->mutex);
vma               443 drivers/gpu/drm/i915/i915_gem_fence_reg.c 	if (fence->vma) {
vma               485 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		struct i915_vma *vma = READ_ONCE(reg->vma);
vma               487 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		GEM_BUG_ON(vma && vma->fence != reg);
vma               493 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		if (vma && !i915_gem_object_is_tiled(vma->obj))
vma               494 drivers/gpu/drm/i915/i915_gem_fence_reg.c 			vma = NULL;
vma               496 drivers/gpu/drm/i915/i915_gem_fence_reg.c 		fence_write(reg, vma);
vma                43 drivers/gpu/drm/i915/i915_gem_fence_reg.h 	struct i915_vma *vma;
vma               119 drivers/gpu/drm/i915/i915_gem_gtt.c i915_get_ggtt_vma_pages(struct i915_vma *vma);
vma               145 drivers/gpu/drm/i915/i915_gem_gtt.c static int ppgtt_bind_vma(struct i915_vma *vma,
vma               152 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
vma               153 drivers/gpu/drm/i915/i915_gem_gtt.c 		err = vma->vm->allocate_va_range(vma->vm,
vma               154 drivers/gpu/drm/i915/i915_gem_gtt.c 						 vma->node.start, vma->size);
vma               161 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (i915_gem_object_is_readonly(vma->obj))
vma               164 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma               169 drivers/gpu/drm/i915/i915_gem_gtt.c static void ppgtt_unbind_vma(struct i915_vma *vma)
vma               171 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
vma               174 drivers/gpu/drm/i915/i915_gem_gtt.c static int ppgtt_set_pages(struct i915_vma *vma)
vma               176 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(vma->pages);
vma               178 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->pages = vma->obj->mm.pages;
vma               180 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->page_sizes = vma->obj->mm.page_sizes;
vma               185 drivers/gpu/drm/i915/i915_gem_gtt.c static void clear_pages(struct i915_vma *vma)
vma               187 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(!vma->pages);
vma               189 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (vma->pages != vma->obj->mm.pages) {
vma               190 drivers/gpu/drm/i915/i915_gem_gtt.c 		sg_free_table(vma->pages);
vma               191 drivers/gpu/drm/i915/i915_gem_gtt.c 		kfree(vma->pages);
vma               193 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->pages = NULL;
vma               195 drivers/gpu/drm/i915/i915_gem_gtt.c 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
vma               509 drivers/gpu/drm/i915/i915_gem_gtt.c 		struct i915_vma *vma, *vn;
vma               511 drivers/gpu/drm/i915/i915_gem_gtt.c 		list_for_each_entry_safe(vma, vn, *phase, vm_link)
vma               512 drivers/gpu/drm/i915/i915_gem_gtt.c 			i915_vma_destroy(vma);
vma              1160 drivers/gpu/drm/i915/i915_gem_gtt.c } sgt_dma(struct i915_vma *vma) {
vma              1161 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct scatterlist *sg = vma->pages->sgl;
vma              1214 drivers/gpu/drm/i915/i915_gem_gtt.c static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
vma              1220 drivers/gpu/drm/i915/i915_gem_gtt.c 	u64 start = vma->node.start;
vma              1223 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
vma              1227 drivers/gpu/drm/i915/i915_gem_gtt.c 			gen8_pdp_for_page_address(vma->vm, start);
vma              1236 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
vma              1253 drivers/gpu/drm/i915/i915_gem_gtt.c 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
vma              1299 drivers/gpu/drm/i915/i915_gem_gtt.c 		     (i915_vm_has_scratch_64K(vma->vm) &&
vma              1300 drivers/gpu/drm/i915/i915_gem_gtt.c 		      !iter->sg && IS_ALIGNED(vma->node.start +
vma              1301 drivers/gpu/drm/i915/i915_gem_gtt.c 					      vma->node.size,
vma              1317 drivers/gpu/drm/i915/i915_gem_gtt.c 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
vma              1320 drivers/gpu/drm/i915/i915_gem_gtt.c 				encode = vma->vm->scratch[0].encode;
vma              1330 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->page_sizes.gtt |= page_size;
vma              1335 drivers/gpu/drm/i915/i915_gem_gtt.c 			      struct i915_vma *vma,
vma              1340 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct sgt_dma iter = sgt_dma(vma);
vma              1342 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
vma              1343 drivers/gpu/drm/i915/i915_gem_gtt.c 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
vma              1345 drivers/gpu/drm/i915/i915_gem_gtt.c 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
vma              1355 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
vma              1644 drivers/gpu/drm/i915/i915_gem_gtt.c 				      struct i915_vma *vma,
vma              1650 drivers/gpu/drm/i915/i915_gem_gtt.c 	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
vma              1654 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct sgt_dma iter = sgt_dma(vma);
vma              1682 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
vma              1719 drivers/gpu/drm/i915/i915_gem_gtt.c 				if (i915_vma_is_bound(ppgtt->vma,
vma              1795 drivers/gpu/drm/i915/i915_gem_gtt.c 	i915_vma_destroy(ppgtt->vma);
vma              1803 drivers/gpu/drm/i915/i915_gem_gtt.c static int pd_vma_set_pages(struct i915_vma *vma)
vma              1805 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->pages = ERR_PTR(-ENODEV);
vma              1809 drivers/gpu/drm/i915/i915_gem_gtt.c static void pd_vma_clear_pages(struct i915_vma *vma)
vma              1811 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(!vma->pages);
vma              1813 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->pages = NULL;
vma              1816 drivers/gpu/drm/i915/i915_gem_gtt.c static int pd_vma_bind(struct i915_vma *vma,
vma              1820 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
vma              1821 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct gen6_ppgtt *ppgtt = vma->private;
vma              1822 drivers/gpu/drm/i915/i915_gem_gtt.c 	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
vma              1838 drivers/gpu/drm/i915/i915_gem_gtt.c static void pd_vma_unbind(struct i915_vma *vma)
vma              1840 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct gen6_ppgtt *ppgtt = vma->private;
vma              1873 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct i915_vma *vma;
vma              1878 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma = i915_vma_alloc();
vma              1879 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (!vma)
vma              1882 drivers/gpu/drm/i915/i915_gem_gtt.c 	i915_active_init(i915, &vma->active, NULL, NULL);
vma              1884 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->vm = &ggtt->vm;
vma              1885 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->ops = &pd_vma_ops;
vma              1886 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->private = ppgtt;
vma              1888 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->size = size;
vma              1889 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->fence_size = size;
vma              1890 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->flags = I915_VMA_GGTT;
vma              1891 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
vma              1893 drivers/gpu/drm/i915/i915_gem_gtt.c 	INIT_LIST_HEAD(&vma->obj_link);
vma              1894 drivers/gpu/drm/i915/i915_gem_gtt.c 	INIT_LIST_HEAD(&vma->closed_link);
vma              1896 drivers/gpu/drm/i915/i915_gem_gtt.c 	mutex_lock(&vma->vm->mutex);
vma              1897 drivers/gpu/drm/i915/i915_gem_gtt.c 	list_add(&vma->vm_link, &vma->vm->unbound_list);
vma              1898 drivers/gpu/drm/i915/i915_gem_gtt.c 	mutex_unlock(&vma->vm->mutex);
vma              1900 drivers/gpu/drm/i915/i915_gem_gtt.c 	return vma;
vma              1924 drivers/gpu/drm/i915/i915_gem_gtt.c 	err = i915_vma_pin(ppgtt->vma,
vma              1945 drivers/gpu/drm/i915/i915_gem_gtt.c 	i915_vma_unpin(ppgtt->vma);
vma              1956 drivers/gpu/drm/i915/i915_gem_gtt.c 	i915_vma_unpin(ppgtt->vma);
vma              1989 drivers/gpu/drm/i915/i915_gem_gtt.c 	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
vma              1990 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (IS_ERR(ppgtt->vma)) {
vma              1991 drivers/gpu/drm/i915/i915_gem_gtt.c 		err = PTR_ERR(ppgtt->vma);
vma              2190 drivers/gpu/drm/i915/i915_gem_gtt.c 				     struct i915_vma *vma,
vma              2206 drivers/gpu/drm/i915/i915_gem_gtt.c 	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
vma              2207 drivers/gpu/drm/i915/i915_gem_gtt.c 	for_each_sgt_dma(addr, sgt_iter, vma->pages)
vma              2239 drivers/gpu/drm/i915/i915_gem_gtt.c 				     struct i915_vma *vma,
vma              2245 drivers/gpu/drm/i915/i915_gem_gtt.c 	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
vma              2248 drivers/gpu/drm/i915/i915_gem_gtt.c 	for_each_sgt_dma(addr, iter, vma->pages)
vma              2328 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct i915_vma *vma;
vma              2337 drivers/gpu/drm/i915/i915_gem_gtt.c 	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
vma              2344 drivers/gpu/drm/i915/i915_gem_gtt.c 					     struct i915_vma *vma,
vma              2348 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct insert_entries arg = { vm, vma, level, flags };
vma              2412 drivers/gpu/drm/i915/i915_gem_gtt.c 				     struct i915_vma *vma,
vma              2419 drivers/gpu/drm/i915/i915_gem_gtt.c 	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
vma              2429 drivers/gpu/drm/i915/i915_gem_gtt.c static int ggtt_bind_vma(struct i915_vma *vma,
vma              2433 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma              2434 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct drm_i915_gem_object *obj = vma->obj;
vma              2444 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma              2446 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
vma              2453 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
vma              2458 drivers/gpu/drm/i915/i915_gem_gtt.c static void ggtt_unbind_vma(struct i915_vma *vma)
vma              2460 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma              2464 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
vma              2467 drivers/gpu/drm/i915/i915_gem_gtt.c static int aliasing_gtt_bind_vma(struct i915_vma *vma,
vma              2471 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma              2477 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (i915_gem_object_is_readonly(vma->obj))
vma              2481 drivers/gpu/drm/i915/i915_gem_gtt.c 		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
vma              2483 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
vma              2485 drivers/gpu/drm/i915/i915_gem_gtt.c 							  vma->node.start,
vma              2486 drivers/gpu/drm/i915/i915_gem_gtt.c 							  vma->size);
vma              2491 drivers/gpu/drm/i915/i915_gem_gtt.c 		alias->vm.insert_entries(&alias->vm, vma,
vma              2499 drivers/gpu/drm/i915/i915_gem_gtt.c 			vma->vm->insert_entries(vma->vm, vma,
vma              2507 drivers/gpu/drm/i915/i915_gem_gtt.c static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
vma              2509 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma              2511 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (vma->flags & I915_VMA_GLOBAL_BIND) {
vma              2512 drivers/gpu/drm/i915/i915_gem_gtt.c 		struct i915_address_space *vm = vma->vm;
vma              2516 drivers/gpu/drm/i915/i915_gem_gtt.c 			vm->clear_range(vm, vma->node.start, vma->size);
vma              2519 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (vma->flags & I915_VMA_LOCAL_BIND) {
vma              2521 drivers/gpu/drm/i915/i915_gem_gtt.c 			&i915_vm_to_ggtt(vma->vm)->alias->vm;
vma              2523 drivers/gpu/drm/i915/i915_gem_gtt.c 		vm->clear_range(vm, vma->node.start, vma->size);
vma              2545 drivers/gpu/drm/i915/i915_gem_gtt.c static int ggtt_set_pages(struct i915_vma *vma)
vma              2549 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(vma->pages);
vma              2551 drivers/gpu/drm/i915/i915_gem_gtt.c 	ret = i915_get_ggtt_vma_pages(vma);
vma              2555 drivers/gpu/drm/i915/i915_gem_gtt.c 	vma->page_sizes = vma->obj->mm.page_sizes;
vma              2752 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct i915_vma *vma, *vn;
vma              2761 drivers/gpu/drm/i915/i915_gem_gtt.c 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
vma              2762 drivers/gpu/drm/i915/i915_gem_gtt.c 		WARN_ON(i915_vma_unbind(vma));
vma              3304 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct i915_vma *vma, *vn;
vma              3316 drivers/gpu/drm/i915/i915_gem_gtt.c 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
vma              3317 drivers/gpu/drm/i915/i915_gem_gtt.c 		struct drm_i915_gem_object *obj = vma->obj;
vma              3319 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
vma              3324 drivers/gpu/drm/i915/i915_gem_gtt.c 		if (!i915_vma_unbind(vma))
vma              3327 drivers/gpu/drm/i915/i915_gem_gtt.c 		WARN_ON(i915_vma_bind(vma,
vma              3562 drivers/gpu/drm/i915/i915_gem_gtt.c i915_get_ggtt_vma_pages(struct i915_vma *vma)
vma              3571 drivers/gpu/drm/i915/i915_gem_gtt.c 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
vma              3573 drivers/gpu/drm/i915/i915_gem_gtt.c 	switch (vma->ggtt_view.type) {
vma              3575 drivers/gpu/drm/i915/i915_gem_gtt.c 		GEM_BUG_ON(vma->ggtt_view.type);
vma              3578 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->pages = vma->obj->mm.pages;
vma              3582 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->pages =
vma              3583 drivers/gpu/drm/i915/i915_gem_gtt.c 			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
vma              3587 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->pages =
vma              3588 drivers/gpu/drm/i915/i915_gem_gtt.c 			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
vma              3592 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
vma              3597 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (IS_ERR(vma->pages)) {
vma              3598 drivers/gpu/drm/i915/i915_gem_gtt.c 		ret = PTR_ERR(vma->pages);
vma              3599 drivers/gpu/drm/i915/i915_gem_gtt.c 		vma->pages = NULL;
vma              3601 drivers/gpu/drm/i915/i915_gem_gtt.c 			  vma->ggtt_view.type, ret);
vma               270 drivers/gpu/drm/i915/i915_gem_gtt.h 	int (*bind_vma)(struct i915_vma *vma,
vma               277 drivers/gpu/drm/i915/i915_gem_gtt.h 	void (*unbind_vma)(struct i915_vma *vma);
vma               279 drivers/gpu/drm/i915/i915_gem_gtt.h 	int (*set_pages)(struct i915_vma *vma);
vma               280 drivers/gpu/drm/i915/i915_gem_gtt.h 	void (*clear_pages)(struct i915_vma *vma);
vma               354 drivers/gpu/drm/i915/i915_gem_gtt.h 			       struct i915_vma *vma,
vma               432 drivers/gpu/drm/i915/i915_gem_gtt.h 	struct i915_vma *vma;
vma               958 drivers/gpu/drm/i915/i915_gpu_error.c 			 struct i915_vma *vma,
vma               971 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!vma || !vma->pages)
vma               974 drivers/gpu/drm/i915/i915_gpu_error.c 	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
vma               985 drivers/gpu/drm/i915/i915_gpu_error.c 	dst->gtt_offset = vma->node.start;
vma               986 drivers/gpu/drm/i915/i915_gpu_error.c 	dst->gtt_size = vma->node.size;
vma               992 drivers/gpu/drm/i915/i915_gpu_error.c 	for_each_sgt_dma(dma, iter, vma->pages) {
vma              1179 drivers/gpu/drm/i915/i915_gpu_error.c 	erq->start = i915_ggtt_offset(request->ring->vma);
vma              1281 drivers/gpu/drm/i915/i915_gpu_error.c 	    struct i915_vma *vma,
vma              1287 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!vma)
vma              1294 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!i915_active_trygrab(&vma->active)) {
vma              1300 drivers/gpu/drm/i915/i915_gpu_error.c 	*c->slot = i915_vma_get(vma);
vma              1332 drivers/gpu/drm/i915/i915_gpu_error.c 		capture = capture_vma(capture, c->vma, &bo[count]);
vma              1411 drivers/gpu/drm/i915/i915_gpu_error.c 				      request->ring->vma,
vma              1429 drivers/gpu/drm/i915/i915_gpu_error.c 			struct i915_vma *vma = *this->slot;
vma              1432 drivers/gpu/drm/i915/i915_gpu_error.c 				i915_error_object_create(i915, vma, compress);
vma              1434 drivers/gpu/drm/i915/i915_gpu_error.c 			i915_active_ungrab(&vma->active);
vma              1435 drivers/gpu/drm/i915/i915_gpu_error.c 			i915_vma_put(vma);
vma              1443 drivers/gpu/drm/i915/i915_gpu_error.c 						 engine->status_page.vma,
vma              1448 drivers/gpu/drm/i915/i915_gpu_error.c 						 engine->wa_ctx.vma,
vma              1488 drivers/gpu/drm/i915/i915_gpu_error.c 						     uc->guc.log.vma,
vma                59 drivers/gpu/drm/i915/i915_mm.c int remap_io_mapping(struct vm_area_struct *vma,
vma                67 drivers/gpu/drm/i915/i915_mm.c 	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
vma                70 drivers/gpu/drm/i915/i915_mm.c 	r.mm = vma->vm_mm;
vma                73 drivers/gpu/drm/i915/i915_mm.c 			  (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
vma                77 drivers/gpu/drm/i915/i915_mm.c 		zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
vma               526 drivers/gpu/drm/i915/i915_perf.c 		struct i915_vma *vma = stream->oa_buffer.vma;
vma               527 drivers/gpu/drm/i915/i915_perf.c 		u32 gtt_offset = i915_ggtt_offset(vma);
vma               661 drivers/gpu/drm/i915/i915_perf.c 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
vma               949 drivers/gpu/drm/i915/i915_perf.c 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
vma              1354 drivers/gpu/drm/i915/i915_perf.c 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
vma              1396 drivers/gpu/drm/i915/i915_perf.c 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
vma              1446 drivers/gpu/drm/i915/i915_perf.c 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
vma              1507 drivers/gpu/drm/i915/i915_perf.c 	struct i915_vma *vma;
vma              1510 drivers/gpu/drm/i915/i915_perf.c 	if (WARN_ON(stream->oa_buffer.vma))
vma              1530 drivers/gpu/drm/i915/i915_perf.c 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
vma              1531 drivers/gpu/drm/i915/i915_perf.c 	if (IS_ERR(vma)) {
vma              1532 drivers/gpu/drm/i915/i915_perf.c 		ret = PTR_ERR(vma);
vma              1535 drivers/gpu/drm/i915/i915_perf.c 	stream->oa_buffer.vma = vma;
vma              1545 drivers/gpu/drm/i915/i915_perf.c 			 i915_ggtt_offset(stream->oa_buffer.vma),
vma              1551 drivers/gpu/drm/i915/i915_perf.c 	__i915_vma_unpin(vma);
vma              1557 drivers/gpu/drm/i915/i915_perf.c 	stream->oa_buffer.vma = NULL;
vma                50 drivers/gpu/drm/i915/i915_request.h 	struct i915_vma *vma;
vma               455 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_vma *vma, unsigned flags),
vma               456 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(vma, flags),
vma               467 drivers/gpu/drm/i915/i915_trace.h 			   __entry->obj = vma->obj;
vma               468 drivers/gpu/drm/i915/i915_trace.h 			   __entry->vm = vma->vm;
vma               469 drivers/gpu/drm/i915/i915_trace.h 			   __entry->offset = vma->node.start;
vma               470 drivers/gpu/drm/i915/i915_trace.h 			   __entry->size = vma->node.size;
vma               481 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_vma *vma),
vma               482 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(vma),
vma               492 drivers/gpu/drm/i915/i915_trace.h 			   __entry->obj = vma->obj;
vma               493 drivers/gpu/drm/i915/i915_trace.h 			   __entry->vm = vma->vm;
vma               494 drivers/gpu/drm/i915/i915_trace.h 			   __entry->offset = vma->node.start;
vma               495 drivers/gpu/drm/i915/i915_trace.h 			   __entry->size = vma->node.size;
vma                48 drivers/gpu/drm/i915/i915_vma.c void i915_vma_free(struct i915_vma *vma)
vma                50 drivers/gpu/drm/i915/i915_vma.c 	return kmem_cache_free(global.slab_vmas, vma);
vma                57 drivers/gpu/drm/i915/i915_vma.c static void vma_print_allocator(struct i915_vma *vma, const char *reason)
vma                63 drivers/gpu/drm/i915/i915_vma.c 	if (!vma->node.stack) {
vma                65 drivers/gpu/drm/i915/i915_vma.c 				 vma->node.start, vma->node.size, reason);
vma                69 drivers/gpu/drm/i915/i915_vma.c 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
vma                72 drivers/gpu/drm/i915/i915_vma.c 			 vma->node.start, vma->node.size, reason, buf);
vma                77 drivers/gpu/drm/i915/i915_vma.c static void vma_print_allocator(struct i915_vma *vma, const char *reason)
vma               103 drivers/gpu/drm/i915/i915_vma.c 	struct i915_vma *vma;
vma               109 drivers/gpu/drm/i915/i915_vma.c 	vma = i915_vma_alloc();
vma               110 drivers/gpu/drm/i915/i915_vma.c 	if (vma == NULL)
vma               113 drivers/gpu/drm/i915/i915_vma.c 	vma->vm = vm;
vma               114 drivers/gpu/drm/i915/i915_vma.c 	vma->ops = &vm->vma_ops;
vma               115 drivers/gpu/drm/i915/i915_vma.c 	vma->obj = obj;
vma               116 drivers/gpu/drm/i915/i915_vma.c 	vma->resv = obj->base.resv;
vma               117 drivers/gpu/drm/i915/i915_vma.c 	vma->size = obj->base.size;
vma               118 drivers/gpu/drm/i915/i915_vma.c 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
vma               120 drivers/gpu/drm/i915/i915_vma.c 	i915_active_init(vm->i915, &vma->active,
vma               126 drivers/gpu/drm/i915/i915_vma.c 		might_lock(&vma->active.mutex);
vma               130 drivers/gpu/drm/i915/i915_vma.c 	INIT_LIST_HEAD(&vma->closed_link);
vma               133 drivers/gpu/drm/i915/i915_vma.c 		vma->ggtt_view = *view;
vma               139 drivers/gpu/drm/i915/i915_vma.c 			vma->size = view->partial.size;
vma               140 drivers/gpu/drm/i915/i915_vma.c 			vma->size <<= PAGE_SHIFT;
vma               141 drivers/gpu/drm/i915/i915_vma.c 			GEM_BUG_ON(vma->size > obj->base.size);
vma               143 drivers/gpu/drm/i915/i915_vma.c 			vma->size = intel_rotation_info_size(&view->rotated);
vma               144 drivers/gpu/drm/i915/i915_vma.c 			vma->size <<= PAGE_SHIFT;
vma               146 drivers/gpu/drm/i915/i915_vma.c 			vma->size = intel_remapped_info_size(&view->remapped);
vma               147 drivers/gpu/drm/i915/i915_vma.c 			vma->size <<= PAGE_SHIFT;
vma               151 drivers/gpu/drm/i915/i915_vma.c 	if (unlikely(vma->size > vm->total))
vma               154 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
vma               157 drivers/gpu/drm/i915/i915_vma.c 		if (unlikely(overflows_type(vma->size, u32)))
vma               160 drivers/gpu/drm/i915/i915_vma.c 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
vma               163 drivers/gpu/drm/i915/i915_vma.c 		if (unlikely(vma->fence_size < vma->size || /* overflow */
vma               164 drivers/gpu/drm/i915/i915_vma.c 			     vma->fence_size > vm->total))
vma               167 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
vma               169 drivers/gpu/drm/i915/i915_vma.c 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
vma               172 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
vma               174 drivers/gpu/drm/i915/i915_vma.c 		vma->flags |= I915_VMA_GGTT;
vma               177 drivers/gpu/drm/i915/i915_vma.c 	spin_lock(&obj->vma.lock);
vma               180 drivers/gpu/drm/i915/i915_vma.c 	p = &obj->vma.tree.rb_node;
vma               195 drivers/gpu/drm/i915/i915_vma.c 			spin_unlock(&obj->vma.lock);
vma               196 drivers/gpu/drm/i915/i915_vma.c 			i915_vma_free(vma);
vma               205 drivers/gpu/drm/i915/i915_vma.c 	rb_link_node(&vma->obj_node, rb, p);
vma               206 drivers/gpu/drm/i915/i915_vma.c 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
vma               208 drivers/gpu/drm/i915/i915_vma.c 	if (i915_vma_is_ggtt(vma))
vma               215 drivers/gpu/drm/i915/i915_vma.c 		list_add(&vma->obj_link, &obj->vma.list);
vma               217 drivers/gpu/drm/i915/i915_vma.c 		list_add_tail(&vma->obj_link, &obj->vma.list);
vma               219 drivers/gpu/drm/i915/i915_vma.c 	spin_unlock(&obj->vma.lock);
vma               222 drivers/gpu/drm/i915/i915_vma.c 	list_add(&vma->vm_link, &vm->unbound_list);
vma               225 drivers/gpu/drm/i915/i915_vma.c 	return vma;
vma               228 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_free(vma);
vma               239 drivers/gpu/drm/i915/i915_vma.c 	rb = obj->vma.tree.rb_node;
vma               241 drivers/gpu/drm/i915/i915_vma.c 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
vma               244 drivers/gpu/drm/i915/i915_vma.c 		cmp = i915_vma_compare(vma, vm, view);
vma               246 drivers/gpu/drm/i915/i915_vma.c 			return vma;
vma               277 drivers/gpu/drm/i915/i915_vma.c 	struct i915_vma *vma;
vma               282 drivers/gpu/drm/i915/i915_vma.c 	spin_lock(&obj->vma.lock);
vma               283 drivers/gpu/drm/i915/i915_vma.c 	vma = vma_lookup(obj, vm, view);
vma               284 drivers/gpu/drm/i915/i915_vma.c 	spin_unlock(&obj->vma.lock);
vma               287 drivers/gpu/drm/i915/i915_vma.c 	if (unlikely(!vma))
vma               288 drivers/gpu/drm/i915/i915_vma.c 		vma = vma_create(obj, vm, view);
vma               290 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
vma               291 drivers/gpu/drm/i915/i915_vma.c 	return vma;
vma               304 drivers/gpu/drm/i915/i915_vma.c int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
vma               311 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               312 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->size > vma->node.size);
vma               314 drivers/gpu/drm/i915/i915_vma.c 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma               315 drivers/gpu/drm/i915/i915_vma.c 					      vma->node.size,
vma               316 drivers/gpu/drm/i915/i915_vma.c 					      vma->vm->total)))
vma               328 drivers/gpu/drm/i915/i915_vma.c 	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
vma               336 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!vma->pages);
vma               338 drivers/gpu/drm/i915/i915_vma.c 	trace_i915_vma_bind(vma, bind_flags);
vma               339 drivers/gpu/drm/i915/i915_vma.c 	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
vma               343 drivers/gpu/drm/i915/i915_vma.c 	vma->flags |= bind_flags;
vma               347 drivers/gpu/drm/i915/i915_vma.c void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
vma               353 drivers/gpu/drm/i915/i915_vma.c 	assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
vma               355 drivers/gpu/drm/i915/i915_vma.c 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma               356 drivers/gpu/drm/i915/i915_vma.c 	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
vma               361 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
vma               362 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
vma               364 drivers/gpu/drm/i915/i915_vma.c 	ptr = vma->iomap;
vma               366 drivers/gpu/drm/i915/i915_vma.c 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma               367 drivers/gpu/drm/i915/i915_vma.c 					vma->node.start,
vma               368 drivers/gpu/drm/i915/i915_vma.c 					vma->node.size);
vma               374 drivers/gpu/drm/i915/i915_vma.c 		vma->iomap = ptr;
vma               377 drivers/gpu/drm/i915/i915_vma.c 	__i915_vma_pin(vma);
vma               379 drivers/gpu/drm/i915/i915_vma.c 	err = i915_vma_pin_fence(vma);
vma               383 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_set_ggtt_write(vma);
vma               387 drivers/gpu/drm/i915/i915_vma.c 	__i915_vma_unpin(vma);
vma               392 drivers/gpu/drm/i915/i915_vma.c void i915_vma_flush_writes(struct i915_vma *vma)
vma               394 drivers/gpu/drm/i915/i915_vma.c 	if (!i915_vma_has_ggtt_write(vma))
vma               397 drivers/gpu/drm/i915/i915_vma.c 	intel_gt_flush_ggtt_writes(vma->vm->gt);
vma               399 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_unset_ggtt_write(vma);
vma               402 drivers/gpu/drm/i915/i915_vma.c void i915_vma_unpin_iomap(struct i915_vma *vma)
vma               404 drivers/gpu/drm/i915/i915_vma.c 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma               406 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->iomap == NULL);
vma               408 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_flush_writes(vma);
vma               410 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_unpin_fence(vma);
vma               411 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_unpin(vma);
vma               416 drivers/gpu/drm/i915/i915_vma.c 	struct i915_vma *vma;
vma               419 drivers/gpu/drm/i915/i915_vma.c 	vma = fetch_and_zero(p_vma);
vma               420 drivers/gpu/drm/i915/i915_vma.c 	if (!vma)
vma               423 drivers/gpu/drm/i915/i915_vma.c 	obj = vma->obj;
vma               426 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_unpin(vma);
vma               427 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_close(vma);
vma               435 drivers/gpu/drm/i915/i915_vma.c bool i915_vma_misplaced(const struct i915_vma *vma,
vma               438 drivers/gpu/drm/i915/i915_vma.c 	if (!drm_mm_node_allocated(&vma->node))
vma               441 drivers/gpu/drm/i915/i915_vma.c 	if (vma->node.size < size)
vma               445 drivers/gpu/drm/i915/i915_vma.c 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
vma               448 drivers/gpu/drm/i915/i915_vma.c 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
vma               452 drivers/gpu/drm/i915/i915_vma.c 	    vma->node.start < (flags & PIN_OFFSET_MASK))
vma               456 drivers/gpu/drm/i915/i915_vma.c 	    vma->node.start != (flags & PIN_OFFSET_MASK))
vma               462 drivers/gpu/drm/i915/i915_vma.c void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
vma               466 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
vma               467 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!vma->fence_size);
vma               469 drivers/gpu/drm/i915/i915_vma.c 	fenceable = (vma->node.size >= vma->fence_size &&
vma               470 drivers/gpu/drm/i915/i915_vma.c 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
vma               472 drivers/gpu/drm/i915/i915_vma.c 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
vma               475 drivers/gpu/drm/i915/i915_vma.c 		vma->flags |= I915_VMA_CAN_FENCE;
vma               477 drivers/gpu/drm/i915/i915_vma.c 		vma->flags &= ~I915_VMA_CAN_FENCE;
vma               485 drivers/gpu/drm/i915/i915_vma.c bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
vma               487 drivers/gpu/drm/i915/i915_vma.c 	struct drm_mm_node *node = &vma->node;
vma               497 drivers/gpu/drm/i915/i915_vma.c 	if (vma->vm->mm.color_adjust == NULL)
vma               542 drivers/gpu/drm/i915/i915_vma.c i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
vma               544 drivers/gpu/drm/i915/i915_vma.c 	struct drm_i915_private *dev_priv = vma->vm->i915;
vma               549 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_is_closed(vma));
vma               550 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
vma               551 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
vma               553 drivers/gpu/drm/i915/i915_vma.c 	size = max(size, vma->size);
vma               554 drivers/gpu/drm/i915/i915_vma.c 	alignment = max(alignment, vma->display_alignment);
vma               556 drivers/gpu/drm/i915/i915_vma.c 		size = max_t(typeof(size), size, vma->fence_size);
vma               558 drivers/gpu/drm/i915/i915_vma.c 				  alignment, vma->fence_alignment);
vma               568 drivers/gpu/drm/i915/i915_vma.c 	end = vma->vm->total;
vma               586 drivers/gpu/drm/i915/i915_vma.c 	if (vma->obj) {
vma               587 drivers/gpu/drm/i915/i915_vma.c 		ret = i915_gem_object_pin_pages(vma->obj);
vma               591 drivers/gpu/drm/i915/i915_vma.c 		cache_level = vma->obj->cache_level;
vma               596 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->pages);
vma               598 drivers/gpu/drm/i915/i915_vma.c 	ret = vma->ops->set_pages(vma);
vma               610 drivers/gpu/drm/i915/i915_vma.c 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
vma               625 drivers/gpu/drm/i915/i915_vma.c 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
vma               633 drivers/gpu/drm/i915/i915_vma.c 				rounddown_pow_of_two(vma->page_sizes.sg |
vma               641 drivers/gpu/drm/i915/i915_vma.c 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
vma               645 drivers/gpu/drm/i915/i915_vma.c 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
vma               649 drivers/gpu/drm/i915/i915_vma.c 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
vma               655 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(vma->node.start < start);
vma               656 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
vma               658 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               659 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
vma               661 drivers/gpu/drm/i915/i915_vma.c 	mutex_lock(&vma->vm->mutex);
vma               662 drivers/gpu/drm/i915/i915_vma.c 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
vma               663 drivers/gpu/drm/i915/i915_vma.c 	mutex_unlock(&vma->vm->mutex);
vma               665 drivers/gpu/drm/i915/i915_vma.c 	if (vma->obj) {
vma               666 drivers/gpu/drm/i915/i915_vma.c 		atomic_inc(&vma->obj->bind_count);
vma               667 drivers/gpu/drm/i915/i915_vma.c 		assert_bind_count(vma->obj);
vma               673 drivers/gpu/drm/i915/i915_vma.c 	vma->ops->clear_pages(vma);
vma               675 drivers/gpu/drm/i915/i915_vma.c 	if (vma->obj)
vma               676 drivers/gpu/drm/i915/i915_vma.c 		i915_gem_object_unpin_pages(vma->obj);
vma               681 drivers/gpu/drm/i915/i915_vma.c i915_vma_remove(struct i915_vma *vma)
vma               683 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               684 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
vma               686 drivers/gpu/drm/i915/i915_vma.c 	vma->ops->clear_pages(vma);
vma               688 drivers/gpu/drm/i915/i915_vma.c 	mutex_lock(&vma->vm->mutex);
vma               689 drivers/gpu/drm/i915/i915_vma.c 	drm_mm_remove_node(&vma->node);
vma               690 drivers/gpu/drm/i915/i915_vma.c 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
vma               691 drivers/gpu/drm/i915/i915_vma.c 	mutex_unlock(&vma->vm->mutex);
vma               697 drivers/gpu/drm/i915/i915_vma.c 	if (vma->obj) {
vma               698 drivers/gpu/drm/i915/i915_vma.c 		struct drm_i915_gem_object *obj = vma->obj;
vma               712 drivers/gpu/drm/i915/i915_vma.c int __i915_vma_do_pin(struct i915_vma *vma,
vma               715 drivers/gpu/drm/i915/i915_vma.c 	const unsigned int bound = vma->flags;
vma               718 drivers/gpu/drm/i915/i915_vma.c 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma               720 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
vma               728 drivers/gpu/drm/i915/i915_vma.c 		ret = i915_vma_insert(vma, size, alignment, flags);
vma               732 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               734 drivers/gpu/drm/i915/i915_vma.c 	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
vma               738 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
vma               740 drivers/gpu/drm/i915/i915_vma.c 	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
vma               741 drivers/gpu/drm/i915/i915_vma.c 		__i915_vma_set_map_and_fenceable(vma);
vma               743 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
vma               748 drivers/gpu/drm/i915/i915_vma.c 		i915_vma_remove(vma);
vma               749 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(vma->pages);
vma               750 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
vma               753 drivers/gpu/drm/i915/i915_vma.c 	__i915_vma_unpin(vma);
vma               757 drivers/gpu/drm/i915/i915_vma.c void i915_vma_close(struct i915_vma *vma)
vma               759 drivers/gpu/drm/i915/i915_vma.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma               762 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_is_closed(vma));
vma               777 drivers/gpu/drm/i915/i915_vma.c 	list_add(&vma->closed_link, &i915->gt.closed_vma);
vma               781 drivers/gpu/drm/i915/i915_vma.c static void __i915_vma_remove_closed(struct i915_vma *vma)
vma               783 drivers/gpu/drm/i915/i915_vma.c 	struct drm_i915_private *i915 = vma->vm->i915;
vma               785 drivers/gpu/drm/i915/i915_vma.c 	if (!i915_vma_is_closed(vma))
vma               789 drivers/gpu/drm/i915/i915_vma.c 	list_del_init(&vma->closed_link);
vma               793 drivers/gpu/drm/i915/i915_vma.c void i915_vma_reopen(struct i915_vma *vma)
vma               795 drivers/gpu/drm/i915/i915_vma.c 	__i915_vma_remove_closed(vma);
vma               798 drivers/gpu/drm/i915/i915_vma.c static void __i915_vma_destroy(struct i915_vma *vma)
vma               800 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->node.allocated);
vma               801 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->fence);
vma               803 drivers/gpu/drm/i915/i915_vma.c 	mutex_lock(&vma->vm->mutex);
vma               804 drivers/gpu/drm/i915/i915_vma.c 	list_del(&vma->vm_link);
vma               805 drivers/gpu/drm/i915/i915_vma.c 	mutex_unlock(&vma->vm->mutex);
vma               807 drivers/gpu/drm/i915/i915_vma.c 	if (vma->obj) {
vma               808 drivers/gpu/drm/i915/i915_vma.c 		struct drm_i915_gem_object *obj = vma->obj;
vma               810 drivers/gpu/drm/i915/i915_vma.c 		spin_lock(&obj->vma.lock);
vma               811 drivers/gpu/drm/i915/i915_vma.c 		list_del(&vma->obj_link);
vma               812 drivers/gpu/drm/i915/i915_vma.c 		rb_erase(&vma->obj_node, &vma->obj->vma.tree);
vma               813 drivers/gpu/drm/i915/i915_vma.c 		spin_unlock(&obj->vma.lock);
vma               816 drivers/gpu/drm/i915/i915_vma.c 	i915_active_fini(&vma->active);
vma               818 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_free(vma);
vma               821 drivers/gpu/drm/i915/i915_vma.c void i915_vma_destroy(struct i915_vma *vma)
vma               823 drivers/gpu/drm/i915/i915_vma.c 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma               825 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_is_pinned(vma));
vma               827 drivers/gpu/drm/i915/i915_vma.c 	__i915_vma_remove_closed(vma);
vma               829 drivers/gpu/drm/i915/i915_vma.c 	WARN_ON(i915_vma_unbind(vma));
vma               830 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_is_active(vma));
vma               832 drivers/gpu/drm/i915/i915_vma.c 	__i915_vma_destroy(vma);
vma               837 drivers/gpu/drm/i915/i915_vma.c 	struct i915_vma *vma, *next;
vma               840 drivers/gpu/drm/i915/i915_vma.c 	list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
vma               841 drivers/gpu/drm/i915/i915_vma.c 		list_del_init(&vma->closed_link);
vma               844 drivers/gpu/drm/i915/i915_vma.c 		i915_vma_destroy(vma);
vma               851 drivers/gpu/drm/i915/i915_vma.c static void __i915_vma_iounmap(struct i915_vma *vma)
vma               853 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_is_pinned(vma));
vma               855 drivers/gpu/drm/i915/i915_vma.c 	if (vma->iomap == NULL)
vma               858 drivers/gpu/drm/i915/i915_vma.c 	io_mapping_unmap(vma->iomap);
vma               859 drivers/gpu/drm/i915/i915_vma.c 	vma->iomap = NULL;
vma               862 drivers/gpu/drm/i915/i915_vma.c void i915_vma_revoke_mmap(struct i915_vma *vma)
vma               864 drivers/gpu/drm/i915/i915_vma.c 	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
vma               867 drivers/gpu/drm/i915/i915_vma.c 	lockdep_assert_held(&vma->vm->mutex);
vma               869 drivers/gpu/drm/i915/i915_vma.c 	if (!i915_vma_has_userfault(vma))
vma               872 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
vma               873 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!vma->obj->userfault_count);
vma               875 drivers/gpu/drm/i915/i915_vma.c 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
vma               876 drivers/gpu/drm/i915/i915_vma.c 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
vma               878 drivers/gpu/drm/i915/i915_vma.c 			    vma->size,
vma               881 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_unset_userfault(vma);
vma               882 drivers/gpu/drm/i915/i915_vma.c 	if (!--vma->obj->userfault_count)
vma               883 drivers/gpu/drm/i915/i915_vma.c 		list_del(&vma->obj->userfault_link);
vma               886 drivers/gpu/drm/i915/i915_vma.c int i915_vma_move_to_active(struct i915_vma *vma,
vma               890 drivers/gpu/drm/i915/i915_vma.c 	struct drm_i915_gem_object *obj = vma->obj;
vma               893 drivers/gpu/drm/i915/i915_vma.c 	assert_vma_held(vma);
vma               895 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               905 drivers/gpu/drm/i915/i915_vma.c 	err = i915_active_ref(&vma->active, rq->timeline, rq);
vma               915 drivers/gpu/drm/i915/i915_vma.c 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
vma               919 drivers/gpu/drm/i915/i915_vma.c 		err = dma_resv_reserve_shared(vma->resv, 1);
vma               923 drivers/gpu/drm/i915/i915_vma.c 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
vma               929 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(!i915_vma_is_active(vma));
vma               933 drivers/gpu/drm/i915/i915_vma.c int i915_vma_unbind(struct i915_vma *vma)
vma               937 drivers/gpu/drm/i915/i915_vma.c 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
vma               944 drivers/gpu/drm/i915/i915_vma.c 	if (i915_vma_is_active(vma)) {
vma               958 drivers/gpu/drm/i915/i915_vma.c 		__i915_vma_pin(vma);
vma               959 drivers/gpu/drm/i915/i915_vma.c 		ret = i915_active_wait(&vma->active);
vma               960 drivers/gpu/drm/i915/i915_vma.c 		__i915_vma_unpin(vma);
vma               964 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_is_active(vma));
vma               966 drivers/gpu/drm/i915/i915_vma.c 	if (i915_vma_is_pinned(vma)) {
vma               967 drivers/gpu/drm/i915/i915_vma.c 		vma_print_allocator(vma, "is pinned");
vma               971 drivers/gpu/drm/i915/i915_vma.c 	if (!drm_mm_node_allocated(&vma->node))
vma               974 drivers/gpu/drm/i915/i915_vma.c 	if (i915_vma_is_map_and_fenceable(vma)) {
vma               981 drivers/gpu/drm/i915/i915_vma.c 		i915_vma_flush_writes(vma);
vma               982 drivers/gpu/drm/i915/i915_vma.c 		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
vma               985 drivers/gpu/drm/i915/i915_vma.c 		mutex_lock(&vma->vm->mutex);
vma               986 drivers/gpu/drm/i915/i915_vma.c 		ret = i915_vma_revoke_fence(vma);
vma               987 drivers/gpu/drm/i915/i915_vma.c 		mutex_unlock(&vma->vm->mutex);
vma               992 drivers/gpu/drm/i915/i915_vma.c 		mutex_lock(&vma->vm->mutex);
vma               993 drivers/gpu/drm/i915/i915_vma.c 		i915_vma_revoke_mmap(vma);
vma               994 drivers/gpu/drm/i915/i915_vma.c 		mutex_unlock(&vma->vm->mutex);
vma               996 drivers/gpu/drm/i915/i915_vma.c 		__i915_vma_iounmap(vma);
vma               997 drivers/gpu/drm/i915/i915_vma.c 		vma->flags &= ~I915_VMA_CAN_FENCE;
vma               999 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(vma->fence);
vma              1000 drivers/gpu/drm/i915/i915_vma.c 	GEM_BUG_ON(i915_vma_has_userfault(vma));
vma              1002 drivers/gpu/drm/i915/i915_vma.c 	if (likely(!vma->vm->closed)) {
vma              1003 drivers/gpu/drm/i915/i915_vma.c 		trace_i915_vma_unbind(vma);
vma              1004 drivers/gpu/drm/i915/i915_vma.c 		vma->ops->unbind_vma(vma);
vma              1006 drivers/gpu/drm/i915/i915_vma.c 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
vma              1008 drivers/gpu/drm/i915/i915_vma.c 	i915_vma_remove(vma);
vma              1013 drivers/gpu/drm/i915/i915_vma.c struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
vma              1015 drivers/gpu/drm/i915/i915_vma.c 	i915_gem_object_make_unshrinkable(vma->obj);
vma              1016 drivers/gpu/drm/i915/i915_vma.c 	return vma;
vma              1019 drivers/gpu/drm/i915/i915_vma.c void i915_vma_make_shrinkable(struct i915_vma *vma)
vma              1021 drivers/gpu/drm/i915/i915_vma.c 	i915_gem_object_make_shrinkable(vma->obj);
vma              1024 drivers/gpu/drm/i915/i915_vma.c void i915_vma_make_purgeable(struct i915_vma *vma)
vma              1026 drivers/gpu/drm/i915/i915_vma.c 	i915_gem_object_make_purgeable(vma->obj);
vma               156 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_is_active(const struct i915_vma *vma)
vma               158 drivers/gpu/drm/i915/i915_vma.h 	return !i915_active_is_idle(&vma->active);
vma               161 drivers/gpu/drm/i915/i915_vma.h int __must_check i915_vma_move_to_active(struct i915_vma *vma,
vma               165 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
vma               167 drivers/gpu/drm/i915/i915_vma.h 	return vma->flags & I915_VMA_GGTT;
vma               170 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
vma               172 drivers/gpu/drm/i915/i915_vma.h 	return vma->flags & I915_VMA_GGTT_WRITE;
vma               175 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
vma               177 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
vma               178 drivers/gpu/drm/i915/i915_vma.h 	vma->flags |= I915_VMA_GGTT_WRITE;
vma               181 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
vma               183 drivers/gpu/drm/i915/i915_vma.h 	vma->flags &= ~I915_VMA_GGTT_WRITE;
vma               186 drivers/gpu/drm/i915/i915_vma.h void i915_vma_flush_writes(struct i915_vma *vma);
vma               188 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
vma               190 drivers/gpu/drm/i915/i915_vma.h 	return vma->flags & I915_VMA_CAN_FENCE;
vma               193 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_set_userfault(struct i915_vma *vma)
vma               195 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
vma               196 drivers/gpu/drm/i915/i915_vma.h 	return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
vma               199 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_unset_userfault(struct i915_vma *vma)
vma               201 drivers/gpu/drm/i915/i915_vma.h 	return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
vma               204 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
vma               206 drivers/gpu/drm/i915/i915_vma.h 	return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
vma               209 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_is_closed(const struct i915_vma *vma)
vma               211 drivers/gpu/drm/i915/i915_vma.h 	return !list_empty(&vma->closed_link);
vma               214 drivers/gpu/drm/i915/i915_vma.h static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
vma               216 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
vma               217 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!vma->node.allocated);
vma               218 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(upper_32_bits(vma->node.start));
vma               219 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
vma               220 drivers/gpu/drm/i915/i915_vma.h 	return lower_32_bits(vma->node.start);
vma               223 drivers/gpu/drm/i915/i915_vma.h static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
vma               225 drivers/gpu/drm/i915/i915_vma.h 	return i915_vm_to_ggtt(vma->vm)->pin_bias;
vma               228 drivers/gpu/drm/i915/i915_vma.h static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
vma               230 drivers/gpu/drm/i915/i915_vma.h 	i915_gem_object_get(vma->obj);
vma               231 drivers/gpu/drm/i915/i915_vma.h 	return vma;
vma               234 drivers/gpu/drm/i915/i915_vma.h static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
vma               236 drivers/gpu/drm/i915/i915_vma.h 	if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
vma               237 drivers/gpu/drm/i915/i915_vma.h 		return vma;
vma               242 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_put(struct i915_vma *vma)
vma               244 drivers/gpu/drm/i915/i915_vma.h 	i915_gem_object_put(vma->obj);
vma               253 drivers/gpu/drm/i915/i915_vma.h i915_vma_compare(struct i915_vma *vma,
vma               261 drivers/gpu/drm/i915/i915_vma.h 	cmp = ptrdiff(vma->vm, vm);
vma               266 drivers/gpu/drm/i915/i915_vma.h 	cmp = vma->ggtt_view.type;
vma               293 drivers/gpu/drm/i915/i915_vma.h 	return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
vma               296 drivers/gpu/drm/i915/i915_vma.h int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
vma               298 drivers/gpu/drm/i915/i915_vma.h bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
vma               299 drivers/gpu/drm/i915/i915_vma.h bool i915_vma_misplaced(const struct i915_vma *vma,
vma               301 drivers/gpu/drm/i915/i915_vma.h void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
vma               302 drivers/gpu/drm/i915/i915_vma.h void i915_vma_revoke_mmap(struct i915_vma *vma);
vma               303 drivers/gpu/drm/i915/i915_vma.h int __must_check i915_vma_unbind(struct i915_vma *vma);
vma               304 drivers/gpu/drm/i915/i915_vma.h void i915_vma_unlink_ctx(struct i915_vma *vma);
vma               305 drivers/gpu/drm/i915/i915_vma.h void i915_vma_close(struct i915_vma *vma);
vma               306 drivers/gpu/drm/i915/i915_vma.h void i915_vma_reopen(struct i915_vma *vma);
vma               307 drivers/gpu/drm/i915/i915_vma.h void i915_vma_destroy(struct i915_vma *vma);
vma               309 drivers/gpu/drm/i915/i915_vma.h #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
vma               311 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_lock(struct i915_vma *vma)
vma               313 drivers/gpu/drm/i915/i915_vma.h 	dma_resv_lock(vma->resv, NULL);
vma               316 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_unlock(struct i915_vma *vma)
vma               318 drivers/gpu/drm/i915/i915_vma.h 	dma_resv_unlock(vma->resv);
vma               321 drivers/gpu/drm/i915/i915_vma.h int __i915_vma_do_pin(struct i915_vma *vma,
vma               324 drivers/gpu/drm/i915/i915_vma.h i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
vma               333 drivers/gpu/drm/i915/i915_vma.h 	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
vma               334 drivers/gpu/drm/i915/i915_vma.h 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               335 drivers/gpu/drm/i915/i915_vma.h 		GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
vma               339 drivers/gpu/drm/i915/i915_vma.h 	return __i915_vma_do_pin(vma, size, alignment, flags);
vma               342 drivers/gpu/drm/i915/i915_vma.h static inline int i915_vma_pin_count(const struct i915_vma *vma)
vma               344 drivers/gpu/drm/i915/i915_vma.h 	return vma->flags & I915_VMA_PIN_MASK;
vma               347 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
vma               349 drivers/gpu/drm/i915/i915_vma.h 	return i915_vma_pin_count(vma);
vma               352 drivers/gpu/drm/i915/i915_vma.h static inline void __i915_vma_pin(struct i915_vma *vma)
vma               354 drivers/gpu/drm/i915/i915_vma.h 	vma->flags++;
vma               355 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
vma               358 drivers/gpu/drm/i915/i915_vma.h static inline void __i915_vma_unpin(struct i915_vma *vma)
vma               360 drivers/gpu/drm/i915/i915_vma.h 	vma->flags--;
vma               363 drivers/gpu/drm/i915/i915_vma.h static inline void i915_vma_unpin(struct i915_vma *vma)
vma               365 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma               366 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma               367 drivers/gpu/drm/i915/i915_vma.h 	__i915_vma_unpin(vma);
vma               370 drivers/gpu/drm/i915/i915_vma.h static inline bool i915_vma_is_bound(const struct i915_vma *vma,
vma               373 drivers/gpu/drm/i915/i915_vma.h 	return vma->flags & where;
vma               389 drivers/gpu/drm/i915/i915_vma.h void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
vma               401 drivers/gpu/drm/i915/i915_vma.h void i915_vma_unpin_iomap(struct i915_vma *vma);
vma               403 drivers/gpu/drm/i915/i915_vma.h static inline struct page *i915_vma_first_page(struct i915_vma *vma)
vma               405 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(!vma->pages);
vma               406 drivers/gpu/drm/i915/i915_vma.h 	return sg_page(vma->pages->sgl);
vma               424 drivers/gpu/drm/i915/i915_vma.h int __must_check i915_vma_pin_fence(struct i915_vma *vma);
vma               425 drivers/gpu/drm/i915/i915_vma.h int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
vma               427 drivers/gpu/drm/i915/i915_vma.h static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
vma               429 drivers/gpu/drm/i915/i915_vma.h 	GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
vma               430 drivers/gpu/drm/i915/i915_vma.h 	atomic_dec(&vma->fence->pin_count);
vma               442 drivers/gpu/drm/i915/i915_vma.h i915_vma_unpin_fence(struct i915_vma *vma)
vma               445 drivers/gpu/drm/i915/i915_vma.h 	if (vma->fence)
vma               446 drivers/gpu/drm/i915/i915_vma.h 		__i915_vma_unpin_fence(vma);
vma               463 drivers/gpu/drm/i915/i915_vma.h 	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
vma               467 drivers/gpu/drm/i915/i915_vma.h void i915_vma_free(struct i915_vma *vma);
vma               469 drivers/gpu/drm/i915/i915_vma.h struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
vma               470 drivers/gpu/drm/i915/i915_vma.h void i915_vma_make_shrinkable(struct i915_vma *vma);
vma               471 drivers/gpu/drm/i915/i915_vma.h void i915_vma_make_purgeable(struct i915_vma *vma);
vma                54 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		struct i915_vma *vma;
vma                60 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
vma                61 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		if (IS_ERR(vma)) {
vma                63 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			if (vma == ERR_PTR(-ENOSPC))
vma                66 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			return PTR_ERR(vma);
vma               110 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	struct i915_vma *vma;
vma               113 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
vma               114 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		if (vma->obj->mm.quirked)
vma               115 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			i915_vma_unpin(vma);
vma               183 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	struct i915_vma *vma;
vma               203 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
vma               204 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
vma               205 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
vma               273 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	struct i915_vma *vma;
vma               292 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
vma               294 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	if (IS_ERR(vma)) {
vma               296 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		err = PTR_ERR(vma);
vma               309 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
vma               311 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	if (IS_ERR(vma)) {
vma               313 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 		err = PTR_ERR(vma);
vma               317 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 	i915_vma_unpin(vma);
vma               328 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma               330 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, vm, NULL);
vma               331 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (!IS_ERR(vma))
vma               332 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			ignored = i915_vma_unbind(vma);
vma               334 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
vma               335 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_close(vma);
vma               353 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	struct i915_vma *vma;
vma               391 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					vma = i915_vma_instance(obj, vm, NULL);
vma               392 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (IS_ERR(vma))
vma               401 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					err = i915_vma_pin(vma, 0, 0, offset | flags);
vma               408 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (!drm_mm_node_allocated(&vma->node) ||
vma               409 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
vma               411 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
vma               417 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					i915_vma_unpin(vma);
vma               428 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					vma = i915_vma_instance(obj, vm, NULL);
vma               429 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (IS_ERR(vma))
vma               438 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (!drm_mm_node_allocated(&vma->node) ||
vma               439 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
vma               441 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size,
vma               447 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					err = i915_vma_unbind(vma);
vma               450 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size,
vma               464 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					vma = i915_vma_instance(obj, vm, NULL);
vma               465 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (IS_ERR(vma))
vma               474 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					err = i915_vma_pin(vma, 0, 0, offset | flags);
vma               481 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (!drm_mm_node_allocated(&vma->node) ||
vma               482 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
vma               484 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
vma               490 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					i915_vma_unpin(vma);
vma               501 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					vma = i915_vma_instance(obj, vm, NULL);
vma               502 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (IS_ERR(vma))
vma               511 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (!drm_mm_node_allocated(&vma->node) ||
vma               512 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
vma               514 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
vma               520 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					err = i915_vma_unbind(vma);
vma               523 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size,
vma               573 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma               581 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, vm, NULL);
vma               582 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma               583 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma               590 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_pin(vma, 0, 0, addr | flags);
vma               593 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				       __func__, addr, vma->size,
vma               597 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_unpin(vma);
vma               599 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			if (!drm_mm_node_allocated(&vma->node) ||
vma               600 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
vma               602 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				       __func__, addr, vma->size);
vma               607 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_unbind(vma);
vma               610 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				       __func__, addr, vma->size, err);
vma               614 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
vma               625 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (!i915_vma_is_ggtt(vma))
vma               626 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_close(vma);
vma               644 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	struct i915_vma *vma;
vma               657 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	vma = i915_vma_instance(obj, vm, NULL);
vma               658 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	if (IS_ERR(vma)) {
vma               659 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = PTR_ERR(vma);
vma               673 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_pin(vma, 0, 0, addr | flags);
vma               683 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			if (!drm_mm_node_allocated(&vma->node) ||
vma               684 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
vma               686 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				       __func__, addr, vma->size);
vma               687 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				i915_vma_unpin(vma);
vma               688 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				err = i915_vma_unbind(vma);
vma               693 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_unpin(vma);
vma               694 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_unbind(vma);
vma               707 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	if (!i915_vma_is_ggtt(vma))
vma               708 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		i915_vma_close(vma);
vma               731 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma               766 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, vm, NULL);
vma               767 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma               768 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma               772 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(vma->size != BIT_ULL(size));
vma               777 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_pin(vma, 0, 0, addr | flags);
vma               787 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			if (!drm_mm_node_allocated(&vma->node) ||
vma               788 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
vma               791 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				i915_vma_unpin(vma);
vma               792 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				err = i915_vma_unbind(vma);
vma               797 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_unpin(vma);
vma               798 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_unbind(vma);
vma               810 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (!i915_vma_is_ggtt(vma))
vma               811 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_close(vma);
vma               838 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma               850 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, vm, NULL);
vma               851 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma               852 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma               856 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(vma->size != size);
vma               858 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_vma_pin(vma, 0, 0, addr | flags);
vma               865 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (!drm_mm_node_allocated(&vma->node) ||
vma               866 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
vma               869 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			i915_vma_unpin(vma);
vma               870 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = i915_vma_unbind(vma);
vma               875 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		i915_vma_unpin(vma);
vma               935 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma               941 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(purge, vm, NULL);
vma               942 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma               943 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma               947 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_vma_pin(vma, 0, 0, flags);
vma               952 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		i915_vma_unpin(vma);
vma               964 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(explode, vm, NULL);
vma               965 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma               966 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma               970 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_vma_pin(vma, 0, 0, flags | size);
vma               974 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		i915_vma_unpin(vma);
vma              1230 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c static void track_vma_bind(struct i915_vma *vma)
vma              1232 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	struct drm_i915_gem_object *obj = vma->obj;
vma              1237 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	vma->pages = obj->mm.pages;
vma              1239 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	mutex_lock(&vma->vm->mutex);
vma              1240 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
vma              1241 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	mutex_unlock(&vma->vm->mutex);
vma              1310 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1327 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1328 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1329 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1333 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
vma              1343 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		track_vma_bind(vma);
vma              1345 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma              1346 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (vma->node.start != total ||
vma              1347 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
vma              1349 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			       vma->node.start, vma->node.size,
vma              1360 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1377 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1378 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1379 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1383 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
vma              1393 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		track_vma_bind(vma);
vma              1395 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma              1396 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (vma->node.start != total ||
vma              1397 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
vma              1399 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			       vma->node.start, vma->node.size,
vma              1408 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1411 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1412 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1413 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1417 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_vma_unbind(vma);
vma              1427 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
vma              1437 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		track_vma_bind(vma);
vma              1439 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma              1440 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (vma->node.start != offset ||
vma              1441 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
vma              1443 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			       vma->node.start, vma->node.size,
vma              1517 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1534 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1535 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1536 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1540 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
vma              1554 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		track_vma_bind(vma);
vma              1555 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		__i915_vma_pin(vma);
vma              1557 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma              1561 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1563 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1564 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1565 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1569 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (!drm_mm_node_allocated(&vma->node)) {
vma              1575 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		__i915_vma_unpin(vma);
vma              1580 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1583 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1584 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1585 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1589 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma              1590 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		offset = vma->node.start;
vma              1592 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_vma_unbind(vma);
vma              1598 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
vma              1607 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		track_vma_bind(vma);
vma              1609 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma              1610 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (vma->node.start != offset) {
vma              1612 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			       offset, vma->node.start);
vma              1622 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		struct i915_vma *vma;
vma              1639 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
vma              1640 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		if (IS_ERR(vma)) {
vma              1641 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			err = PTR_ERR(vma);
vma              1645 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
vma              1654 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		track_vma_bind(vma);
vma              1656 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
vma                23 drivers/gpu/drm/i915/selftests/i915_live_selftests.h selftest(vma, i915_vma_live_selftests)
vma                23 drivers/gpu/drm/i915/selftests/i915_mock_selftests.h selftest(vma, i915_vma_mock_selftests)
vma               619 drivers/gpu/drm/i915/selftests/i915_request.c 	struct i915_vma *vma;
vma               640 drivers/gpu/drm/i915/selftests/i915_request.c 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
vma               641 drivers/gpu/drm/i915/selftests/i915_request.c 	if (IS_ERR(vma)) {
vma               642 drivers/gpu/drm/i915/selftests/i915_request.c 		err = PTR_ERR(vma);
vma               646 drivers/gpu/drm/i915/selftests/i915_request.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
vma               650 drivers/gpu/drm/i915/selftests/i915_request.c 	return vma;
vma               767 drivers/gpu/drm/i915/selftests/i915_request.c 	struct i915_vma *vma;
vma               775 drivers/gpu/drm/i915/selftests/i915_request.c 	vma = i915_vma_instance(obj, vm, NULL);
vma               776 drivers/gpu/drm/i915/selftests/i915_request.c 	if (IS_ERR(vma)) {
vma               777 drivers/gpu/drm/i915/selftests/i915_request.c 		err = PTR_ERR(vma);
vma               781 drivers/gpu/drm/i915/selftests/i915_request.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               793 drivers/gpu/drm/i915/selftests/i915_request.c 		*cmd++ = lower_32_bits(vma->node.start);
vma               794 drivers/gpu/drm/i915/selftests/i915_request.c 		*cmd++ = upper_32_bits(vma->node.start);
vma               797 drivers/gpu/drm/i915/selftests/i915_request.c 		*cmd++ = lower_32_bits(vma->node.start);
vma               800 drivers/gpu/drm/i915/selftests/i915_request.c 		*cmd++ = lower_32_bits(vma->node.start);
vma               809 drivers/gpu/drm/i915/selftests/i915_request.c 	return vma;
vma                35 drivers/gpu/drm/i915/selftests/i915_vma.c static bool assert_vma(struct i915_vma *vma,
vma                41 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (vma->vm != ctx->vm) {
vma                46 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (vma->size != obj->base.size) {
vma                48 drivers/gpu/drm/i915/selftests/i915_vma.c 		       vma->size, obj->base.size);
vma                52 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
vma                54 drivers/gpu/drm/i915/selftests/i915_vma.c 		       vma->ggtt_view.type);
vma                66 drivers/gpu/drm/i915/selftests/i915_vma.c 	struct i915_vma *vma;
vma                69 drivers/gpu/drm/i915/selftests/i915_vma.c 	vma = i915_vma_instance(obj, vm, view);
vma                70 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (IS_ERR(vma))
vma                71 drivers/gpu/drm/i915/selftests/i915_vma.c 		return vma;
vma                74 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (vma->vm != vm) {
vma                76 drivers/gpu/drm/i915/selftests/i915_vma.c 		       vma->vm, vm);
vma                80 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
vma                82 drivers/gpu/drm/i915/selftests/i915_vma.c 		       i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
vma                86 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (i915_vma_compare(vma, vm, view)) {
vma                91 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (i915_vma_compare(vma, vma->vm,
vma                92 drivers/gpu/drm/i915/selftests/i915_vma.c 			     i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
vma               102 drivers/gpu/drm/i915/selftests/i915_vma.c 	return vma;
vma               117 drivers/gpu/drm/i915/selftests/i915_vma.c 				struct i915_vma *vma;
vma               120 drivers/gpu/drm/i915/selftests/i915_vma.c 				vma = checked_vma_instance(obj, vm, NULL);
vma               121 drivers/gpu/drm/i915/selftests/i915_vma.c 				if (IS_ERR(vma))
vma               122 drivers/gpu/drm/i915/selftests/i915_vma.c 					return PTR_ERR(vma);
vma               124 drivers/gpu/drm/i915/selftests/i915_vma.c 				if (!assert_vma(vma, obj, ctx)) {
vma               130 drivers/gpu/drm/i915/selftests/i915_vma.c 					err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               136 drivers/gpu/drm/i915/selftests/i915_vma.c 					i915_vma_unpin(vma);
vma               223 drivers/gpu/drm/i915/selftests/i915_vma.c static bool assert_pin_valid(const struct i915_vma *vma,
vma               230 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (i915_vma_misplaced(vma, mode->size, 0, mode->flags))
vma               237 drivers/gpu/drm/i915/selftests/i915_vma.c static bool assert_pin_enospc(const struct i915_vma *vma,
vma               245 drivers/gpu/drm/i915/selftests/i915_vma.c static bool assert_pin_einval(const struct i915_vma *vma,
vma               308 drivers/gpu/drm/i915/selftests/i915_vma.c 	struct i915_vma *vma;
vma               321 drivers/gpu/drm/i915/selftests/i915_vma.c 	vma = checked_vma_instance(obj, &ggtt->vm, NULL);
vma               322 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (IS_ERR(vma))
vma               326 drivers/gpu/drm/i915/selftests/i915_vma.c 		err = i915_vma_pin(vma, m->size, 0, m->flags);
vma               327 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (!m->assert(vma, m, err)) {
vma               333 drivers/gpu/drm/i915/selftests/i915_vma.c 				i915_vma_unpin(vma);
vma               339 drivers/gpu/drm/i915/selftests/i915_vma.c 			i915_vma_unpin(vma);
vma               340 drivers/gpu/drm/i915/selftests/i915_vma.c 			err = i915_vma_unbind(vma);
vma               530 drivers/gpu/drm/i915/selftests/i915_vma.c 					struct i915_vma *vma;
vma               532 drivers/gpu/drm/i915/selftests/i915_vma.c 					vma = checked_vma_instance(obj, vm, &view);
vma               533 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (IS_ERR(vma)) {
vma               534 drivers/gpu/drm/i915/selftests/i915_vma.c 						err = PTR_ERR(vma);
vma               538 drivers/gpu/drm/i915/selftests/i915_vma.c 					err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
vma               545 drivers/gpu/drm/i915/selftests/i915_vma.c 					    vma->size != rotated_size(a, b) * PAGE_SIZE) {
vma               547 drivers/gpu/drm/i915/selftests/i915_vma.c 						       PAGE_SIZE * rotated_size(a, b), vma->size);
vma               553 drivers/gpu/drm/i915/selftests/i915_vma.c 					    vma->size > rotated_size(a, b) * PAGE_SIZE) {
vma               555 drivers/gpu/drm/i915/selftests/i915_vma.c 						       PAGE_SIZE * rotated_size(a, b), vma->size);
vma               560 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (vma->pages->nents > rotated_size(a, b)) {
vma               562 drivers/gpu/drm/i915/selftests/i915_vma.c 						       rotated_size(a, b), vma->pages->nents);
vma               567 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (vma->node.size < vma->size) {
vma               569 drivers/gpu/drm/i915/selftests/i915_vma.c 						       vma->size, vma->node.size);
vma               574 drivers/gpu/drm/i915/selftests/i915_vma.c 					if (vma->pages == obj->mm.pages) {
vma               580 drivers/gpu/drm/i915/selftests/i915_vma.c 					sg = vma->pages->sgl;
vma               603 drivers/gpu/drm/i915/selftests/i915_vma.c 					i915_vma_unpin(vma);
vma               619 drivers/gpu/drm/i915/selftests/i915_vma.c 			   struct i915_vma *vma,
vma               626 drivers/gpu/drm/i915/selftests/i915_vma.c 	for_each_sgt_dma(dma, sgt, vma->pages) {
vma               648 drivers/gpu/drm/i915/selftests/i915_vma.c static bool assert_pin(struct i915_vma *vma,
vma               655 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (vma->size != size) {
vma               657 drivers/gpu/drm/i915/selftests/i915_vma.c 		       name, size, vma->size);
vma               661 drivers/gpu/drm/i915/selftests/i915_vma.c 	if (vma->node.size < vma->size) {
vma               663 drivers/gpu/drm/i915/selftests/i915_vma.c 		       name, vma->size, vma->node.size);
vma               668 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
vma               674 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (vma->pages == vma->obj->mm.pages) {
vma               680 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
vma               682 drivers/gpu/drm/i915/selftests/i915_vma.c 			       vma->ggtt_view.type);
vma               686 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (vma->pages != vma->obj->mm.pages) {
vma               709 drivers/gpu/drm/i915/selftests/i915_vma.c 	struct i915_vma *vma;
vma               735 drivers/gpu/drm/i915/selftests/i915_vma.c 				vma = checked_vma_instance(obj, vm, &view);
vma               736 drivers/gpu/drm/i915/selftests/i915_vma.c 				if (IS_ERR(vma)) {
vma               737 drivers/gpu/drm/i915/selftests/i915_vma.c 					err = PTR_ERR(vma);
vma               741 drivers/gpu/drm/i915/selftests/i915_vma.c 				err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
vma               745 drivers/gpu/drm/i915/selftests/i915_vma.c 				if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
vma               752 drivers/gpu/drm/i915/selftests/i915_vma.c 				if (!assert_partial(obj, vma, offset, sz)) {
vma               759 drivers/gpu/drm/i915/selftests/i915_vma.c 				i915_vma_unpin(vma);
vma               767 drivers/gpu/drm/i915/selftests/i915_vma.c 		list_for_each_entry(vma, &obj->vma.list, obj_link)
vma               777 drivers/gpu/drm/i915/selftests/i915_vma.c 		vma = checked_vma_instance(obj, vm, NULL);
vma               778 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (IS_ERR(vma)) {
vma               779 drivers/gpu/drm/i915/selftests/i915_vma.c 			err = PTR_ERR(vma);
vma               783 drivers/gpu/drm/i915/selftests/i915_vma.c 		err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
vma               787 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
vma               793 drivers/gpu/drm/i915/selftests/i915_vma.c 		i915_vma_unpin(vma);
vma               796 drivers/gpu/drm/i915/selftests/i915_vma.c 		list_for_each_entry(vma, &obj->vma.list, obj_link)
vma               892 drivers/gpu/drm/i915/selftests/i915_vma.c 			struct i915_vma *vma;
vma               903 drivers/gpu/drm/i915/selftests/i915_vma.c 			vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
vma               904 drivers/gpu/drm/i915/selftests/i915_vma.c 			if (IS_ERR(vma)) {
vma               905 drivers/gpu/drm/i915/selftests/i915_vma.c 				err = PTR_ERR(vma);
vma               909 drivers/gpu/drm/i915/selftests/i915_vma.c 			GEM_BUG_ON(vma->ggtt_view.type != *t);
vma               911 drivers/gpu/drm/i915/selftests/i915_vma.c 			map = i915_vma_pin_iomap(vma);
vma               912 drivers/gpu/drm/i915/selftests/i915_vma.c 			i915_vma_unpin(vma);
vma               932 drivers/gpu/drm/i915/selftests/i915_vma.c 			i915_vma_unpin_iomap(vma);
vma               934 drivers/gpu/drm/i915/selftests/i915_vma.c 			vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
vma               935 drivers/gpu/drm/i915/selftests/i915_vma.c 			if (IS_ERR(vma)) {
vma               936 drivers/gpu/drm/i915/selftests/i915_vma.c 				err = PTR_ERR(vma);
vma               940 drivers/gpu/drm/i915/selftests/i915_vma.c 			GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
vma               942 drivers/gpu/drm/i915/selftests/i915_vma.c 			map = i915_vma_pin_iomap(vma);
vma               943 drivers/gpu/drm/i915/selftests/i915_vma.c 			i915_vma_unpin(vma);
vma               966 drivers/gpu/drm/i915/selftests/i915_vma.c 						i915_vma_unpin_iomap(vma);
vma               971 drivers/gpu/drm/i915/selftests/i915_vma.c 			i915_vma_unpin_iomap(vma);
vma                74 drivers/gpu/drm/i915/selftests/igt_spinner.c static int move_to_active(struct i915_vma *vma,
vma                80 drivers/gpu/drm/i915/selftests/igt_spinner.c 	i915_vma_lock(vma);
vma                81 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = i915_request_await_object(rq, vma->obj,
vma                84 drivers/gpu/drm/i915/selftests/igt_spinner.c 		err = i915_vma_move_to_active(vma, rq, flags);
vma                85 drivers/gpu/drm/i915/selftests/igt_spinner.c 	i915_vma_unlock(vma);
vma                97 drivers/gpu/drm/i915/selftests/igt_spinner.c 	struct i915_vma *hws, *vma;
vma               103 drivers/gpu/drm/i915/selftests/igt_spinner.c 	vma = i915_vma_instance(spin->obj, ce->vm, NULL);
vma               104 drivers/gpu/drm/i915/selftests/igt_spinner.c 	if (IS_ERR(vma))
vma               105 drivers/gpu/drm/i915/selftests/igt_spinner.c 		return ERR_CAST(vma);
vma               111 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
vma               125 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = move_to_active(vma, rq, 0);
vma               143 drivers/gpu/drm/i915/selftests/igt_spinner.c 	*batch++ = lower_32_bits(vma->node.start);
vma               144 drivers/gpu/drm/i915/selftests/igt_spinner.c 	*batch++ = upper_32_bits(vma->node.start);
vma               156 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
vma               166 drivers/gpu/drm/i915/selftests/igt_spinner.c 	i915_vma_unpin(vma);
vma                36 drivers/gpu/drm/i915/selftests/mock_gtt.c 				struct i915_vma *vma,
vma                41 drivers/gpu/drm/i915/selftests/mock_gtt.c static int mock_bind_ppgtt(struct i915_vma *vma,
vma                46 drivers/gpu/drm/i915/selftests/mock_gtt.c 	vma->flags |= I915_VMA_LOCAL_BIND;
vma                50 drivers/gpu/drm/i915/selftests/mock_gtt.c static void mock_unbind_ppgtt(struct i915_vma *vma)
vma                85 drivers/gpu/drm/i915/selftests/mock_gtt.c static int mock_bind_ggtt(struct i915_vma *vma,
vma                89 drivers/gpu/drm/i915/selftests/mock_gtt.c 	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
vma                93 drivers/gpu/drm/i915/selftests/mock_gtt.c static void mock_unbind_ggtt(struct i915_vma *vma)
vma                93 drivers/gpu/drm/lima/lima_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma                94 drivers/gpu/drm/lima/lima_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               100 drivers/gpu/drm/lima/lima_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               103 drivers/gpu/drm/lima/lima_gem.c 	return vmf_insert_mixed(vma, vmf->address, pfn);
vma               112 drivers/gpu/drm/lima/lima_gem.c void lima_set_vma_flags(struct vm_area_struct *vma)
vma               114 drivers/gpu/drm/lima/lima_gem.c 	pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vma               116 drivers/gpu/drm/lima/lima_gem.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               117 drivers/gpu/drm/lima/lima_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               118 drivers/gpu/drm/lima/lima_gem.c 	vma->vm_page_prot = pgprot_writecombine(prot);
vma               121 drivers/gpu/drm/lima/lima_gem.c int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               125 drivers/gpu/drm/lima/lima_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               129 drivers/gpu/drm/lima/lima_gem.c 	lima_set_vma_flags(vma);
vma                19 drivers/gpu/drm/lima/lima_gem.h int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                23 drivers/gpu/drm/lima/lima_gem.h void lima_set_vma_flags(struct vm_area_struct *vma);
vma                36 drivers/gpu/drm/lima/lima_gem_prime.c int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma                40 drivers/gpu/drm/lima/lima_gem_prime.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma                44 drivers/gpu/drm/lima/lima_gem_prime.c 	lima_set_vma_flags(vma);
vma                11 drivers/gpu/drm/lima/lima_gem_prime.h int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
vma               130 drivers/gpu/drm/mediatek/mtk_drm_gem.c 				   struct vm_area_struct *vma)
vma               141 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               143 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
vma               146 drivers/gpu/drm/mediatek/mtk_drm_gem.c 		drm_gem_vm_close(vma);
vma               151 drivers/gpu/drm/mediatek/mtk_drm_gem.c int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma               155 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma               159 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	return mtk_drm_gem_object_mmap(obj, vma);
vma               162 drivers/gpu/drm/mediatek/mtk_drm_gem.c int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               167 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               171 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	obj = vma->vm_private_data;
vma               177 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	vma->vm_pgoff = 0;
vma               179 drivers/gpu/drm/mediatek/mtk_drm_gem.c 	return mtk_drm_gem_object_mmap(obj, vma);
vma                42 drivers/gpu/drm/mediatek/mtk_drm_gem.h int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                44 drivers/gpu/drm/mediatek/mtk_drm_gem.h 			 struct vm_area_struct *vma);
vma               223 drivers/gpu/drm/mgag200/mgag200_drv.h int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
vma               236 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_vma *vma, int npages);
vma               238 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_vma *vma);
vma               240 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_vma *vma);
vma               242 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_vma *vma, int prot,
vma               245 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_vma *vma);
vma               270 drivers/gpu/drm/msm/msm_drv.h 			struct vm_area_struct *vma);
vma               271 drivers/gpu/drm/msm/msm_drv.h int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma               291 drivers/gpu/drm/msm/msm_drv.h int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
vma                15 drivers/gpu/drm/msm/msm_fbdev.c 					struct vm_area_struct *vma);
vma                16 drivers/gpu/drm/msm/msm_fbdev.c static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
vma                44 drivers/gpu/drm/msm/msm_fbdev.c static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma                51 drivers/gpu/drm/msm/msm_fbdev.c 	ret = drm_gem_mmap_obj(bo, bo->size, vma);
vma                57 drivers/gpu/drm/msm/msm_fbdev.c 	return msm_gem_mmap_obj(bo, vma);
vma               209 drivers/gpu/drm/msm/msm_gem.c 		struct vm_area_struct *vma)
vma               213 drivers/gpu/drm/msm/msm_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               214 drivers/gpu/drm/msm/msm_gem.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               217 drivers/gpu/drm/msm/msm_gem.c 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma               219 drivers/gpu/drm/msm/msm_gem.c 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
vma               226 drivers/gpu/drm/msm/msm_gem.c 		fput(vma->vm_file);
vma               228 drivers/gpu/drm/msm/msm_gem.c 		vma->vm_pgoff = 0;
vma               229 drivers/gpu/drm/msm/msm_gem.c 		vma->vm_file  = obj->filp;
vma               231 drivers/gpu/drm/msm/msm_gem.c 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma               237 drivers/gpu/drm/msm/msm_gem.c int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               241 drivers/gpu/drm/msm/msm_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               247 drivers/gpu/drm/msm/msm_gem.c 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
vma               252 drivers/gpu/drm/msm/msm_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               253 drivers/gpu/drm/msm/msm_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               284 drivers/gpu/drm/msm/msm_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               291 drivers/gpu/drm/msm/msm_gem.c 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
vma               333 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               337 drivers/gpu/drm/msm/msm_gem.c 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
vma               338 drivers/gpu/drm/msm/msm_gem.c 	if (!vma)
vma               341 drivers/gpu/drm/msm/msm_gem.c 	vma->aspace = aspace;
vma               343 drivers/gpu/drm/msm/msm_gem.c 	list_add_tail(&vma->list, &msm_obj->vmas);
vma               345 drivers/gpu/drm/msm/msm_gem.c 	return vma;
vma               352 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               356 drivers/gpu/drm/msm/msm_gem.c 	list_for_each_entry(vma, &msm_obj->vmas, list) {
vma               357 drivers/gpu/drm/msm/msm_gem.c 		if (vma->aspace == aspace)
vma               358 drivers/gpu/drm/msm/msm_gem.c 			return vma;
vma               364 drivers/gpu/drm/msm/msm_gem.c static void del_vma(struct msm_gem_vma *vma)
vma               366 drivers/gpu/drm/msm/msm_gem.c 	if (!vma)
vma               369 drivers/gpu/drm/msm/msm_gem.c 	list_del(&vma->list);
vma               370 drivers/gpu/drm/msm/msm_gem.c 	kfree(vma);
vma               378 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma, *tmp;
vma               382 drivers/gpu/drm/msm/msm_gem.c 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
vma               383 drivers/gpu/drm/msm/msm_gem.c 		if (vma->aspace) {
vma               384 drivers/gpu/drm/msm/msm_gem.c 			msm_gem_purge_vma(vma->aspace, vma);
vma               385 drivers/gpu/drm/msm/msm_gem.c 			msm_gem_close_vma(vma->aspace, vma);
vma               387 drivers/gpu/drm/msm/msm_gem.c 		del_vma(vma);
vma               395 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               400 drivers/gpu/drm/msm/msm_gem.c 	vma = lookup_vma(obj, aspace);
vma               402 drivers/gpu/drm/msm/msm_gem.c 	if (!vma) {
vma               403 drivers/gpu/drm/msm/msm_gem.c 		vma = add_vma(obj, aspace);
vma               404 drivers/gpu/drm/msm/msm_gem.c 		if (IS_ERR(vma))
vma               405 drivers/gpu/drm/msm/msm_gem.c 			return PTR_ERR(vma);
vma               407 drivers/gpu/drm/msm/msm_gem.c 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
vma               409 drivers/gpu/drm/msm/msm_gem.c 			del_vma(vma);
vma               414 drivers/gpu/drm/msm/msm_gem.c 	*iova = vma->iova;
vma               422 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               434 drivers/gpu/drm/msm/msm_gem.c 	vma = lookup_vma(obj, aspace);
vma               435 drivers/gpu/drm/msm/msm_gem.c 	if (WARN_ON(!vma))
vma               442 drivers/gpu/drm/msm/msm_gem.c 	return msm_gem_map_vma(aspace, vma, prot,
vma               492 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               495 drivers/gpu/drm/msm/msm_gem.c 	vma = lookup_vma(obj, aspace);
vma               497 drivers/gpu/drm/msm/msm_gem.c 	WARN_ON(!vma);
vma               499 drivers/gpu/drm/msm/msm_gem.c 	return vma ? vma->iova : 0;
vma               511 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               514 drivers/gpu/drm/msm/msm_gem.c 	vma = lookup_vma(obj, aspace);
vma               516 drivers/gpu/drm/msm/msm_gem.c 	if (!WARN_ON(!vma))
vma               517 drivers/gpu/drm/msm/msm_gem.c 		msm_gem_unmap_vma(aspace, vma);
vma               805 drivers/gpu/drm/msm/msm_gem.c 	struct msm_gem_vma *vma;
vma               835 drivers/gpu/drm/msm/msm_gem.c 		list_for_each_entry(vma, &msm_obj->vmas, list)
vma               837 drivers/gpu/drm/msm/msm_gem.c 				vma->aspace != NULL ? vma->aspace->name : NULL,
vma               838 drivers/gpu/drm/msm/msm_gem.c 				vma->iova, vma->mapped ? "mapped" : "unmapped",
vma               839 drivers/gpu/drm/msm/msm_gem.c 				vma->inuse);
vma              1052 drivers/gpu/drm/msm/msm_gem.c 		struct msm_gem_vma *vma;
vma              1058 drivers/gpu/drm/msm/msm_gem.c 		vma = add_vma(obj, NULL);
vma              1060 drivers/gpu/drm/msm/msm_gem.c 		if (IS_ERR(vma)) {
vma              1061 drivers/gpu/drm/msm/msm_gem.c 			ret = PTR_ERR(vma);
vma              1065 drivers/gpu/drm/msm/msm_gem.c 		to_msm_bo(obj)->vram_node = &vma->node;
vma              1075 drivers/gpu/drm/msm/msm_gem.c 		vma->iova = physaddr(obj);
vma                35 drivers/gpu/drm/msm/msm_gem_prime.c int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma                39 drivers/gpu/drm/msm/msm_gem_prime.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma                43 drivers/gpu/drm/msm/msm_gem_prime.c 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
vma                32 drivers/gpu/drm/msm/msm_gem_vma.c 		struct msm_gem_vma *vma)
vma                34 drivers/gpu/drm/msm/msm_gem_vma.c 	unsigned size = vma->node.size << PAGE_SHIFT;
vma                37 drivers/gpu/drm/msm/msm_gem_vma.c 	if (WARN_ON(vma->inuse > 0))
vma                41 drivers/gpu/drm/msm/msm_gem_vma.c 	if (!vma->mapped)
vma                45 drivers/gpu/drm/msm/msm_gem_vma.c 		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
vma                47 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->mapped = false;
vma                52 drivers/gpu/drm/msm/msm_gem_vma.c 		struct msm_gem_vma *vma)
vma                54 drivers/gpu/drm/msm/msm_gem_vma.c 	if (!WARN_ON(!vma->iova))
vma                55 drivers/gpu/drm/msm/msm_gem_vma.c 		vma->inuse--;
vma                60 drivers/gpu/drm/msm/msm_gem_vma.c 		struct msm_gem_vma *vma, int prot,
vma                66 drivers/gpu/drm/msm/msm_gem_vma.c 	if (WARN_ON(!vma->iova))
vma                70 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->inuse++;
vma                72 drivers/gpu/drm/msm/msm_gem_vma.c 	if (vma->mapped)
vma                75 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->mapped = true;
vma                78 drivers/gpu/drm/msm/msm_gem_vma.c 		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
vma                82 drivers/gpu/drm/msm/msm_gem_vma.c 		vma->mapped = false;
vma                89 drivers/gpu/drm/msm/msm_gem_vma.c 		struct msm_gem_vma *vma)
vma                91 drivers/gpu/drm/msm/msm_gem_vma.c 	if (WARN_ON(vma->inuse > 0 || vma->mapped))
vma                95 drivers/gpu/drm/msm/msm_gem_vma.c 	if (vma->iova)
vma                96 drivers/gpu/drm/msm/msm_gem_vma.c 		drm_mm_remove_node(&vma->node);
vma                99 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->iova = 0;
vma               106 drivers/gpu/drm/msm/msm_gem_vma.c 		struct msm_gem_vma *vma, int npages)
vma               110 drivers/gpu/drm/msm/msm_gem_vma.c 	if (WARN_ON(vma->iova))
vma               114 drivers/gpu/drm/msm/msm_gem_vma.c 	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
vma               120 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->iova = vma->node.start << PAGE_SHIFT;
vma               121 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->mapped = false;
vma               758 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
vma               759 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
vma               760 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
vma               761 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
vma               787 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 src_offset = mem->vma[0].addr;
vma               788 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 dst_offset = mem->vma[1].addr;
vma               825 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 src_offset = mem->vma[0].addr;
vma               826 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 dst_offset = mem->vma[1].addr;
vma               864 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 src_offset = mem->vma[0].addr;
vma               865 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 dst_offset = mem->vma[1].addr;
vma               905 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
vma               906 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
vma               907 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
vma               908 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
vma               924 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
vma               925 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
vma               926 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
vma               927 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
vma               955 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 src_offset = mem->vma[0].addr;
vma               956 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 dst_offset = mem->vma[1].addr;
vma              1101 drivers/gpu/drm/nouveau/nouveau_bo.c 			   old_mem->mem.size, &old_mem->vma[0]);
vma              1106 drivers/gpu/drm/nouveau/nouveau_bo.c 			   new_mem->mem.size, &old_mem->vma[1]);
vma              1110 drivers/gpu/drm/nouveau/nouveau_bo.c 	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
vma              1114 drivers/gpu/drm/nouveau/nouveau_bo.c 	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
vma              1117 drivers/gpu/drm/nouveau/nouveau_bo.c 		nvif_vmm_put(vmm, &old_mem->vma[1]);
vma              1118 drivers/gpu/drm/nouveau/nouveau_bo.c 		nvif_vmm_put(vmm, &old_mem->vma[0]);
vma              1309 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_vma *vma;
vma              1317 drivers/gpu/drm/nouveau/nouveau_bo.c 		list_for_each_entry(vma, &nvbo->vma_list, head) {
vma              1318 drivers/gpu/drm/nouveau/nouveau_bo.c 			nouveau_vma_map(vma, mem);
vma              1321 drivers/gpu/drm/nouveau/nouveau_bo.c 		list_for_each_entry(vma, &nvbo->vma_list, head) {
vma              1323 drivers/gpu/drm/nouveau/nouveau_bo.c 			nouveau_vma_unmap(vma);
vma               109 drivers/gpu/drm/nouveau/nouveau_chan.c 		nouveau_vma_del(&chan->push.vma);
vma               167 drivers/gpu/drm/nouveau/nouveau_chan.c 				      &chan->push.vma);
vma               173 drivers/gpu/drm/nouveau/nouveau_chan.c 		chan->push.addr = chan->push.vma->addr;
vma                23 drivers/gpu/drm/nouveau/nouveau_chan.h 		struct nouveau_vma *vma;
vma                14 drivers/gpu/drm/nouveau/nouveau_display.h 	struct nouveau_vma *vma;
vma               142 drivers/gpu/drm/nouveau/nouveau_dmem.c 	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
vma               174 drivers/gpu/drm/nouveau/nouveau_dmem.c 		.vma		= vmf->vma,
vma               624 drivers/gpu/drm/nouveau/nouveau_dmem.c 			 struct vm_area_struct *vma,
vma               632 drivers/gpu/drm/nouveau/nouveau_dmem.c 		.vma		= vma,
vma                37 drivers/gpu/drm/nouveau/nouveau_dmem.h 			     struct vm_area_struct *vma,
vma               356 drivers/gpu/drm/nouveau/nouveau_fbcon.c 		ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
vma               403 drivers/gpu/drm/nouveau/nouveau_fbcon.c 		nouveau_vma_del(&fb->vma);
vma               422 drivers/gpu/drm/nouveau/nouveau_fbcon.c 		nouveau_vma_del(&nouveau_fb->vma);
vma                87 drivers/gpu/drm/nouveau/nouveau_fence.h 	struct nouveau_vma *vma;
vma                68 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_vma *vma;
vma                82 drivers/gpu/drm/nouveau/nouveau_gem.c 	ret = nouveau_vma_new(nvbo, vmm, &vma);
vma                92 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_vma *vma;
vma                96 drivers/gpu/drm/nouveau/nouveau_gem.c nouveau_gem_object_delete(struct nouveau_vma *vma)
vma                98 drivers/gpu/drm/nouveau/nouveau_gem.c 	nouveau_fence_unref(&vma->fence);
vma                99 drivers/gpu/drm/nouveau/nouveau_gem.c 	nouveau_vma_del(&vma);
vma               107 drivers/gpu/drm/nouveau/nouveau_gem.c 	nouveau_gem_object_delete(work->vma);
vma               112 drivers/gpu/drm/nouveau/nouveau_gem.c nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
vma               114 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
vma               117 drivers/gpu/drm/nouveau/nouveau_gem.c 	list_del_init(&vma->head);
vma               120 drivers/gpu/drm/nouveau/nouveau_gem.c 		nouveau_gem_object_delete(vma);
vma               126 drivers/gpu/drm/nouveau/nouveau_gem.c 		nouveau_gem_object_delete(vma);
vma               131 drivers/gpu/drm/nouveau/nouveau_gem.c 	work->vma = vma;
vma               132 drivers/gpu/drm/nouveau/nouveau_gem.c 	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
vma               143 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_vma *vma;
vma               153 drivers/gpu/drm/nouveau/nouveau_gem.c 	vma = nouveau_vma_find(nvbo, vmm);
vma               154 drivers/gpu/drm/nouveau/nouveau_gem.c 	if (vma) {
vma               155 drivers/gpu/drm/nouveau/nouveau_gem.c 		if (--vma->refs == 0) {
vma               158 drivers/gpu/drm/nouveau/nouveau_gem.c 				nouveau_gem_object_unmap(nvbo, vma);
vma               227 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_vma *vma;
vma               237 drivers/gpu/drm/nouveau/nouveau_gem.c 		vma = nouveau_vma_find(nvbo, vmm);
vma               238 drivers/gpu/drm/nouveau/nouveau_gem.c 		if (!vma)
vma               241 drivers/gpu/drm/nouveau/nouveau_gem.c 		rep->offset = vma->addr;
vma               345 drivers/gpu/drm/nouveau/nouveau_gem.c 				struct nouveau_vma *vma =
vma               347 drivers/gpu/drm/nouveau/nouveau_gem.c 				nouveau_fence_unref(&vma->fence);
vma               349 drivers/gpu/drm/nouveau/nouveau_gem.c 				vma->fence = fence;
vma               441 drivers/gpu/drm/nouveau/nouveau_gem.c 			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
vma               442 drivers/gpu/drm/nouveau/nouveau_gem.c 			if (!vma) {
vma               448 drivers/gpu/drm/nouveau/nouveau_gem.c 			b->user_priv = (uint64_t)(unsigned long)vma;
vma               783 drivers/gpu/drm/nouveau/nouveau_gem.c 			struct nouveau_vma *vma = (void *)(unsigned long)
vma               786 drivers/gpu/drm/nouveau/nouveau_gem.c 			nv50_dma_push(chan, vma->addr + push[i].offset,
vma                37 drivers/gpu/drm/nouveau/nouveau_mem.c 		struct nvif_vmm *vmm, struct nvif_vma *vma)
vma                78 drivers/gpu/drm/nouveau/nouveau_mem.c 	ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
vma                87 drivers/gpu/drm/nouveau/nouveau_mem.c 	nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
vma                88 drivers/gpu/drm/nouveau/nouveau_mem.c 	nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
vma                20 drivers/gpu/drm/nouveau/nouveau_mem.h 	struct nvif_vma vma[2];
vma                39 drivers/gpu/drm/nouveau/nouveau_sgdma.c 	ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
vma               182 drivers/gpu/drm/nouveau/nouveau_svm.c 		struct vm_area_struct *vma;
vma               185 drivers/gpu/drm/nouveau/nouveau_svm.c 		vma = find_vma_intersection(mm, addr, end);
vma               186 drivers/gpu/drm/nouveau/nouveau_svm.c 		if (!vma)
vma               189 drivers/gpu/drm/nouveau/nouveau_svm.c 		addr = max(addr, vma->vm_start);
vma               190 drivers/gpu/drm/nouveau/nouveau_svm.c 		next = min(vma->vm_end, end);
vma               192 drivers/gpu/drm/nouveau/nouveau_svm.c 		nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
vma               541 drivers/gpu/drm/nouveau/nouveau_svm.c 	struct vm_area_struct *vma;
vma               619 drivers/gpu/drm/nouveau/nouveau_svm.c 		vma = find_vma_intersection(svmm->mm, start, limit);
vma               620 drivers/gpu/drm/nouveau/nouveau_svm.c 		if (!vma) {
vma               626 drivers/gpu/drm/nouveau/nouveau_svm.c 		start = max_t(u64, start, vma->vm_start);
vma               627 drivers/gpu/drm/nouveau/nouveau_svm.c 		limit = min_t(u64, limit, vma->vm_end);
vma               139 drivers/gpu/drm/nouveau/nouveau_ttm.c 			   reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
vma               149 drivers/gpu/drm/nouveau/nouveau_ttm.c 	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
vma               162 drivers/gpu/drm/nouveau/nouveau_ttm.c nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
vma               167 drivers/gpu/drm/nouveau/nouveau_ttm.c 	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
vma                29 drivers/gpu/drm/nouveau/nouveau_vmm.c nouveau_vma_unmap(struct nouveau_vma *vma)
vma                31 drivers/gpu/drm/nouveau/nouveau_vmm.c 	if (vma->mem) {
vma                32 drivers/gpu/drm/nouveau/nouveau_vmm.c 		nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
vma                33 drivers/gpu/drm/nouveau/nouveau_vmm.c 		vma->mem = NULL;
vma                38 drivers/gpu/drm/nouveau/nouveau_vmm.c nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
vma                40 drivers/gpu/drm/nouveau/nouveau_vmm.c 	struct nvif_vma tmp = { .addr = vma->addr };
vma                41 drivers/gpu/drm/nouveau/nouveau_vmm.c 	int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
vma                44 drivers/gpu/drm/nouveau/nouveau_vmm.c 	vma->mem = mem;
vma                51 drivers/gpu/drm/nouveau/nouveau_vmm.c 	struct nouveau_vma *vma;
vma                53 drivers/gpu/drm/nouveau/nouveau_vmm.c 	list_for_each_entry(vma, &nvbo->vma_list, head) {
vma                54 drivers/gpu/drm/nouveau/nouveau_vmm.c 		if (vma->vmm == vmm)
vma                55 drivers/gpu/drm/nouveau/nouveau_vmm.c 			return vma;
vma                64 drivers/gpu/drm/nouveau/nouveau_vmm.c 	struct nouveau_vma *vma = *pvma;
vma                65 drivers/gpu/drm/nouveau/nouveau_vmm.c 	if (vma && --vma->refs <= 0) {
vma                66 drivers/gpu/drm/nouveau/nouveau_vmm.c 		if (likely(vma->addr != ~0ULL)) {
vma                67 drivers/gpu/drm/nouveau/nouveau_vmm.c 			struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
vma                68 drivers/gpu/drm/nouveau/nouveau_vmm.c 			nvif_vmm_put(&vma->vmm->vmm, &tmp);
vma                70 drivers/gpu/drm/nouveau/nouveau_vmm.c 		list_del(&vma->head);
vma                81 drivers/gpu/drm/nouveau/nouveau_vmm.c 	struct nouveau_vma *vma;
vma                85 drivers/gpu/drm/nouveau/nouveau_vmm.c 	if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
vma                86 drivers/gpu/drm/nouveau/nouveau_vmm.c 		vma->refs++;
vma                90 drivers/gpu/drm/nouveau/nouveau_vmm.c 	if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
vma                92 drivers/gpu/drm/nouveau/nouveau_vmm.c 	vma->vmm = vmm;
vma                93 drivers/gpu/drm/nouveau/nouveau_vmm.c 	vma->refs = 1;
vma                94 drivers/gpu/drm/nouveau/nouveau_vmm.c 	vma->addr = ~0ULL;
vma                95 drivers/gpu/drm/nouveau/nouveau_vmm.c 	vma->mem = NULL;
vma                96 drivers/gpu/drm/nouveau/nouveau_vmm.c 	vma->fence = NULL;
vma                97 drivers/gpu/drm/nouveau/nouveau_vmm.c 	list_add_tail(&vma->head, &nvbo->vma_list);
vma               106 drivers/gpu/drm/nouveau/nouveau_vmm.c 		vma->addr = tmp.addr;
vma               107 drivers/gpu/drm/nouveau/nouveau_vmm.c 		ret = nouveau_vma_map(vma, mem);
vma               111 drivers/gpu/drm/nouveau/nouveau_vmm.c 		vma->addr = tmp.addr;
vma               243 drivers/gpu/drm/nouveau/nv50_fbcon.c 	OUT_RING(chan, upper_32_bits(fb->vma->addr));
vma               244 drivers/gpu/drm/nouveau/nv50_fbcon.c 	OUT_RING(chan, lower_32_bits(fb->vma->addr));
vma               252 drivers/gpu/drm/nouveau/nv50_fbcon.c 	OUT_RING(chan, upper_32_bits(fb->vma->addr));
vma               253 drivers/gpu/drm/nouveau/nv50_fbcon.c 	OUT_RING(chan, lower_32_bits(fb->vma->addr));
vma                72 drivers/gpu/drm/nouveau/nv84_fence.c 	u64 addr = fctx->vma->addr + chan->chid * 16;
vma                82 drivers/gpu/drm/nouveau/nv84_fence.c 	u64 addr = fctx->vma->addr + prev->chid * 16;
vma               102 drivers/gpu/drm/nouveau/nv84_fence.c 	nouveau_vma_del(&fctx->vma);
vma               129 drivers/gpu/drm/nouveau/nv84_fence.c 	ret = nouveau_vma_new(priv->bo, chan->vmm, &fctx->vma);
vma               243 drivers/gpu/drm/nouveau/nvc0_fbcon.c 	OUT_RING  (chan, upper_32_bits(fb->vma->addr));
vma               244 drivers/gpu/drm/nouveau/nvc0_fbcon.c 	OUT_RING  (chan, lower_32_bits(fb->vma->addr));
vma               254 drivers/gpu/drm/nouveau/nvc0_fbcon.c 	OUT_RING  (chan, upper_32_bits(fb->vma->addr));
vma               255 drivers/gpu/drm/nouveau/nvc0_fbcon.c 	OUT_RING  (chan, lower_32_bits(fb->vma->addr));
vma                65 drivers/gpu/drm/nouveau/nvif/vmm.c nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
vma                67 drivers/gpu/drm/nouveau/nvif/vmm.c 	if (vma->size) {
vma                70 drivers/gpu/drm/nouveau/nvif/vmm.c 						.addr = vma->addr,
vma                72 drivers/gpu/drm/nouveau/nvif/vmm.c 		vma->size = 0;
vma                78 drivers/gpu/drm/nouveau/nvif/vmm.c 	     u8 page, u8 align, u64 size, struct nvif_vma *vma)
vma                83 drivers/gpu/drm/nouveau/nvif/vmm.c 	args.version = vma->size = 0;
vma               101 drivers/gpu/drm/nouveau/nvif/vmm.c 		vma->addr = args.addr;
vma               102 drivers/gpu/drm/nouveau/nvif/vmm.c 		vma->size = args.size;
vma                47 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c 		     struct nvkm_vmm *vmm, struct nvkm_vma *vma,
vma                50 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c 	return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
vma               109 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c 		struct nvkm_vmm *vmm, struct nvkm_vma *vma,
vma               113 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c 			       vmm, vma, argv, argc);
vma                17 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h 		struct nvkm_vma *vma;
vma                21 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h 		struct nvkm_vma *vma;
vma               114 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c 		u64 addr = chan->engn[engine->subdev.index].vma->addr;
vma               129 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c 	nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
vma               150 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c 			   &chan->engn[engn].vma);
vma               155 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c 			       chan->engn[engn].vma, NULL, 0);
vma               133 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c 		u64   addr = chan->engn[engine->subdev.index].vma->addr;
vma               154 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c 	nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
vma               175 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c 			   &chan->engn[engn].vma);
vma               180 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c 			       chan->engn[engn].vma, NULL, 0);
vma               100 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c 	addr = chan->engn[engine->subdev.index].vma->addr;
vma               361 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 		nvkm_vmm_put(chan->vmm, &chan->data[i].vma);
vma               425 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 				   &chan->data[i].vma);
vma               432 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 				      chan->data[i].vma, &args, sizeof(args));
vma               446 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 			u64 info = chan->data[mmio->buffer].vma->addr;
vma               266 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h 		struct nvkm_vma *vma;
vma                39 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	      struct nvkm_vma *vma, void *argv, u32 argc)
vma                48 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
vma               282 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		  struct nvkm_vma *vma, void *argv, u32 argc)
vma               291 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
vma               184 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c 		 struct nvkm_vma *vma, void *argv, u32 argc)
vma               187 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c 	return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
vma                71 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		 struct nvkm_vma *vma, void *argv, u32 argc)
vma                79 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
vma               111 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		 struct nvkm_vma *vma, void *argv, u32 argc)
vma               119 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
vma               116 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	struct nvkm_vma *vma;
vma               126 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	vma = nvkm_vmm_node_search(vmm, addr);
vma               127 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -ENOENT, !vma || vma->addr != addr) {
vma               129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 			  addr, vma ? vma->addr : ~0ULL);
vma               133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
vma               135 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 			  vma->user, !client->super, vma->busy);
vma               139 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -EINVAL, !vma->memory) {
vma               144 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	nvkm_vmm_unmap_locked(vmm, vma, false);
vma               160 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	struct nvkm_vma *vma;
vma               179 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
vma               184 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
vma               186 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 			  vma->user, !client->super, vma->busy);
vma               190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -EINVAL, vma->mapped && !vma->memory) {
vma               195 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
vma               196 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		if (addr + size > vma->addr + vma->size || vma->memory ||
vma               197 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
vma               200 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 				  !!vma->memory, vma->refd, vma->mapref,
vma               201 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 				  addr, size, vma->addr, (u64)vma->size);
vma               205 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		vma = nvkm_vmm_node_split(vmm, vma, addr, size);
vma               206 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		if (!vma) {
vma               211 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	vma->busy = true;
vma               214 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
vma               222 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	vma->busy = false;
vma               223 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	nvkm_vmm_unmap_region(vmm, vma);
vma               238 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	struct nvkm_vma *vma;
vma               248 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	vma = nvkm_vmm_node_search(vmm, args->v0.addr);
vma               249 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
vma               251 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 			  vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
vma               255 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
vma               257 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 			  vma->user, !client->super, vma->busy);
vma               261 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	nvkm_vmm_put_locked(vmm, vma);
vma               276 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	struct nvkm_vma *vma;
vma               294 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 				  page, align, size, &vma);
vma               299 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	args->v0.addr = vma->addr;
vma               300 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	vma->user = !client->super;
vma               750 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
vma               751 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma) {
vma               752 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->addr = addr;
vma               753 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->size = size;
vma               754 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->page = NVKM_VMA_PAGE_NONE;
vma               755 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->refd = NVKM_VMA_PAGE_NONE;
vma               757 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return vma;
vma               761 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
vma               765 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	BUG_ON(vma->size == tail);
vma               767 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
vma               769 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->size -= tail;
vma               771 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->mapref = vma->mapref;
vma               772 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->sparse = vma->sparse;
vma               773 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->page = vma->page;
vma               774 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->refd = vma->refd;
vma               775 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->used = vma->used;
vma               776 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->part = vma->part;
vma               777 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->user = vma->user;
vma               778 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->busy = vma->busy;
vma               779 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	new->mapped = vma->mapped;
vma               780 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	list_add(&new->head, &vma->head);
vma               785 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma               787 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	rb_erase(&vma->tree, &vmm->free);
vma               791 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma               793 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_free_remove(vmm, vma);
vma               794 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	list_del(&vma->head);
vma               795 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	kfree(vma);
vma               799 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma               807 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->size < this->size)
vma               810 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->size > this->size)
vma               813 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->addr < this->addr)
vma               816 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->addr > this->addr)
vma               822 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	rb_link_node(&vma->tree, parent, ptr);
vma               823 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	rb_insert_color(&vma->tree, &vmm->free);
vma               827 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma               829 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	rb_erase(&vma->tree, &vmm->root);
vma               833 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma               835 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_node_remove(vmm, vma);
vma               836 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	list_del(&vma->head);
vma               837 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	kfree(vma);
vma               841 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma               849 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->addr < this->addr)
vma               852 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->addr > this->addr)
vma               858 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	rb_link_node(&vma->tree, parent, ptr);
vma               859 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	rb_insert_color(&vma->tree, &vmm->root);
vma               867 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
vma               868 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (addr < vma->addr)
vma               871 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (addr >= vma->addr + vma->size)
vma               874 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			return vma;
vma               884 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		    struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
vma               887 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->size == size) {
vma               888 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma->size += next->size;
vma               891 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				prev->size += vma->size;
vma               892 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				nvkm_vmm_node_delete(vmm, vma);
vma               895 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			return vma;
vma               900 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->size -= size;
vma               908 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->size != size) {
vma               909 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_node_remove(vmm, vma);
vma               911 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma->addr += size;
vma               912 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma->size -= size;
vma               913 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_node_insert(vmm, vma);
vma               915 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			prev->size += vma->size;
vma               916 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_node_delete(vmm, vma);
vma               921 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return vma;
vma               926 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		    struct nvkm_vma *vma, u64 addr, u64 size)
vma               930 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->addr != addr) {
vma               931 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		prev = vma;
vma               932 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
vma               934 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->part = true;
vma               935 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_node_insert(vmm, vma);
vma               938 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->size != size) {
vma               940 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
vma               941 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
vma               948 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return vma;
vma               952 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vma_dump(struct nvkm_vma *vma)
vma               955 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->addr, (u64)vma->size,
vma               956 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->used ? '-' : 'F',
vma               957 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->mapref ? 'R' : '-',
vma               958 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->sparse ? 'S' : '-',
vma               959 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
vma               960 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
vma               961 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->part ? 'P' : '-',
vma               962 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->user ? 'U' : '-',
vma               963 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->busy ? 'B' : '-',
vma               964 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->mapped ? 'M' : '-',
vma               965 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	       vma->memory);
vma               971 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma;
vma               972 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	list_for_each_entry(vma, &vmm->list, head) {
vma               973 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vma_dump(vma);
vma               980 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma;
vma               987 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
vma               988 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_put(vmm, &vma);
vma              1002 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma = list_first_entry(&vmm->list, typeof(*vma), head);
vma              1003 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	list_del(&vma->head);
vma              1004 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	kfree(vma);
vma              1021 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma;
vma              1022 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!(vma = nvkm_vma_new(addr, size)))
vma              1024 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->mapref = true;
vma              1025 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->sparse = false;
vma              1026 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->used = true;
vma              1027 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->user = true;
vma              1028 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_node_insert(vmm, vma);
vma              1029 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	list_add_tail(&vma->head, &vmm->list);
vma              1042 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma;
vma              1109 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			if (!(vma = nvkm_vma_new(addr, size)))
vma              1111 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_free_insert(vmm, vma);
vma              1112 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			list_add_tail(&vma->head, &vmm->list);
vma              1129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
vma              1132 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_free_insert(vmm, vma);
vma              1133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		list_add(&vma->head, &vmm->list);
vma              1151 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
vma              1157 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
vma              1162 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
vma              1169 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
vma              1170 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return nvkm_vmm_node_split(vmm, vma, addr, size);
vma              1176 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
vma              1181 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!vma)
vma              1185 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!vma->mapped || vma->memory)
vma              1188 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		size = min(limit - start, vma->size - (start - vma->addr));
vma              1190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
vma              1193 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
vma              1195 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma = next;
vma              1196 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma->refd = NVKM_VMA_PAGE_NONE;
vma              1197 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma->mapped = false;
vma              1199 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	} while ((vma = node(vma, next)) && (start = vma->addr) < limit);
vma              1214 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma, *tmp;
vma              1235 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!(vma = nvkm_vmm_node_search(vmm, addr)))
vma              1240 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		bool mapped = vma->mapped;
vma              1253 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		size = min_t(u64, size, vma->size + vma->addr - addr);
vma              1258 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!vma->mapref || vma->memory) {
vma              1275 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
vma              1287 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma = tmp;
vma              1313 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->addr + vma->size == addr + size)
vma              1314 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma = node(vma, next);
vma              1328 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	} while (vma && start < limit);
vma              1334 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma              1339 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
vma              1340 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_unref(&vma->memory);
vma              1341 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->mapped = false;
vma              1343 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->part && (prev = node(vma, prev)) && prev->mapped)
vma              1345 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if ((next = node(vma, next)) && (!next->part || next->mapped))
vma              1347 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
vma              1351 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
vma              1353 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
vma              1355 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->mapref) {
vma              1356 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
vma              1357 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->refd = NVKM_VMA_PAGE_NONE;
vma              1359 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
vma              1362 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_unmap_region(vmm, vma);
vma              1366 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma              1368 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->memory) {
vma              1370 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_unmap_locked(vmm, vma, false);
vma              1376 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
vma              1398 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (!IS_ALIGNED(     vma->addr, 1ULL << map->page->shift) ||
vma              1399 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	    !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
vma              1403 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		    vma->addr, (u64)vma->size, map->offset, map->page->shift,
vma              1412 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
vma              1417 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
vma              1424 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
vma              1431 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
vma              1434 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			  map->offset, (u64)vma->size);
vma              1439 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->page == NVKM_VMA_PAGE_NONE &&
vma              1440 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	    vma->refd == NVKM_VMA_PAGE_NONE) {
vma              1444 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
vma              1448 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
vma              1453 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vma->refd != NVKM_VMA_PAGE_NONE)
vma              1454 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			map->page = &vmm->func->page[vma->refd];
vma              1456 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			map->page = &vmm->func->page[vma->page];
vma              1458 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
vma              1491 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->refd == NVKM_VMA_PAGE_NONE) {
vma              1492 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
vma              1496 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->refd = map->page - vmm->func->page;
vma              1498 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
vma              1501 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
vma              1502 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_unref(&vma->memory);
vma              1503 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->memory = nvkm_memory_ref(map->memory);
vma              1504 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->mapped = true;
vma              1505 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->tags = map->tags;
vma              1510 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
vma              1515 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
vma              1516 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->busy = false;
vma              1522 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma              1526 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if ((prev = node(vma, prev)) && !prev->used) {
vma              1527 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->addr  = prev->addr;
vma              1528 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->size += prev->size;
vma              1532 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if ((next = node(vma, next)) && !next->used) {
vma              1533 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma->size += next->size;
vma              1537 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_free_insert(vmm, vma);
vma              1541 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma              1544 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *next = vma;
vma              1546 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	BUG_ON(vma->part);
vma              1548 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->mapref || !vma->sparse) {
vma              1569 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 							size, vma->sparse,
vma              1583 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	next = vma;
vma              1587 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	} while ((next = node(vma, next)) && next->part);
vma              1589 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->sparse && !vma->mapref) {
vma              1598 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
vma              1600 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma->sparse) {
vma              1609 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
vma              1613 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_node_remove(vmm, vma);
vma              1616 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->page = NVKM_VMA_PAGE_NONE;
vma              1617 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->refd = NVKM_VMA_PAGE_NONE;
vma              1618 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->used = false;
vma              1619 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->user = false;
vma              1620 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_put_region(vmm, vma);
vma              1626 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma = *pvma;
vma              1627 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (vma) {
vma              1629 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_put_locked(vmm, vma);
vma              1641 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vma *vma = NULL, *tmp;
vma              1721 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			vma = this;
vma              1726 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (unlikely(!vma))
vma              1732 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (addr != vma->addr) {
vma              1733 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
vma              1734 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_put_region(vmm, vma);
vma              1737 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_free_insert(vmm, vma);
vma              1738 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vma = tmp;
vma              1741 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (size != vma->size) {
vma              1742 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
vma              1743 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_put_region(vmm, vma);
vma              1751 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
vma              1753 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
vma              1755 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
vma              1759 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_vmm_put_region(vmm, vma);
vma              1763 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->mapref = mapref && !getref;
vma              1764 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->sparse = sparse;
vma              1765 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->page = page - vmm->func->page;
vma              1766 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
vma              1767 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vma->used = true;
vma              1768 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_vmm_node_insert(vmm, vma);
vma              1769 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	*pvma = vma;
vma                42 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c 	struct nvkm_vma *vma = NULL;
vma                51 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c 	ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
vma                57 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c 	ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
vma                68 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c 	ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
vma                98 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c 	nvkm_vmm_put(gsb->vmm, &vma);
vma               347 drivers/gpu/drm/omapdrm/omap_gem.c 		struct vm_area_struct *vma, struct vm_fault *vmf)
vma               354 drivers/gpu/drm/omapdrm/omap_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               367 drivers/gpu/drm/omapdrm/omap_gem.c 	return vmf_insert_mixed(vma, vmf->address,
vma               373 drivers/gpu/drm/omapdrm/omap_gem.c 		struct vm_area_struct *vma, struct vm_fault *vmf)
vma               403 drivers/gpu/drm/omapdrm/omap_gem.c 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               463 drivers/gpu/drm/omapdrm/omap_gem.c 		ret = vmf_insert_mixed(vma,
vma               492 drivers/gpu/drm/omapdrm/omap_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               493 drivers/gpu/drm/omapdrm/omap_gem.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               517 drivers/gpu/drm/omapdrm/omap_gem.c 		ret = omap_gem_fault_2d(obj, vma, vmf);
vma               519 drivers/gpu/drm/omapdrm/omap_gem.c 		ret = omap_gem_fault_1d(obj, vma, vmf);
vma               528 drivers/gpu/drm/omapdrm/omap_gem.c int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               532 drivers/gpu/drm/omapdrm/omap_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               538 drivers/gpu/drm/omapdrm/omap_gem.c 	return omap_gem_mmap_obj(vma->vm_private_data, vma);
vma               542 drivers/gpu/drm/omapdrm/omap_gem.c 		struct vm_area_struct *vma)
vma               546 drivers/gpu/drm/omapdrm/omap_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               547 drivers/gpu/drm/omapdrm/omap_gem.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               550 drivers/gpu/drm/omapdrm/omap_gem.c 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma               552 drivers/gpu/drm/omapdrm/omap_gem.c 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
vma               567 drivers/gpu/drm/omapdrm/omap_gem.c 		fput(vma->vm_file);
vma               568 drivers/gpu/drm/omapdrm/omap_gem.c 		vma->vm_pgoff = 0;
vma               569 drivers/gpu/drm/omapdrm/omap_gem.c 		vma->vm_file  = get_file(obj->filp);
vma               571 drivers/gpu/drm/omapdrm/omap_gem.c 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma                61 drivers/gpu/drm/omapdrm/omap_gem.h int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                63 drivers/gpu/drm/omapdrm/omap_gem.h 		struct vm_area_struct *vma);
vma               108 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 		struct vm_area_struct *vma)
vma               113 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
vma               117 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c 	return omap_gem_mmap_obj(obj, vma);
vma               357 drivers/gpu/drm/qxl/qxl_drv.h int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
vma               456 drivers/gpu/drm/qxl/qxl_drv.h 				struct vm_area_struct *vma);
vma                59 drivers/gpu/drm/qxl/qxl_ttm.c 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
vma                66 drivers/gpu/drm/qxl/qxl_ttm.c int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
vma                78 drivers/gpu/drm/qxl/qxl_ttm.c 		  filp->private_data, vma->vm_pgoff);
vma                80 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
vma                84 drivers/gpu/drm/qxl/qxl_ttm.c 		ttm_vm_ops = vma->vm_ops;
vma                88 drivers/gpu/drm/qxl/qxl_ttm.c 	vma->vm_ops = &qxl_ttm_vm_ops;
vma               144 drivers/gpu/drm/radeon/radeon_drv.c int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
vma               503 drivers/gpu/drm/radeon/radeon_ttm.c 		struct vm_area_struct *vma;
vma               504 drivers/gpu/drm/radeon/radeon_ttm.c 		vma = find_vma(gtt->usermm, gtt->userptr);
vma               505 drivers/gpu/drm/radeon/radeon_ttm.c 		if (!vma || vma->vm_file || vma->vm_end < end)
vma               892 drivers/gpu/drm/radeon/radeon_ttm.c 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
vma               903 drivers/gpu/drm/radeon/radeon_ttm.c int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
vma               912 drivers/gpu/drm/radeon/radeon_ttm.c 	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
vma               917 drivers/gpu/drm/radeon/radeon_ttm.c 		ttm_vm_ops = vma->vm_ops;
vma               921 drivers/gpu/drm/radeon/radeon_ttm.c 	vma->vm_ops = &radeon_ttm_vm_ops;
vma                22 drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c 			       struct vm_area_struct *vma)
vma                27 drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c 	return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
vma               213 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 					      struct vm_area_struct *vma)
vma               217 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	unsigned long user_count = vma_pages(vma);
vma               222 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	return vm_map_pages(vma, rk_obj->pages, count);
vma               226 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 					    struct vm_area_struct *vma)
vma               231 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
vma               236 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 					struct vm_area_struct *vma)
vma               245 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               248 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
vma               250 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
vma               253 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 		drm_gem_vm_close(vma);
vma               259 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 			  struct vm_area_struct *vma)
vma               263 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
vma               267 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	return rockchip_drm_gem_object_mmap(obj, vma);
vma               271 drivers/gpu/drm/rockchip/rockchip_drm_gem.c int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               276 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               284 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	vma->vm_pgoff = 0;
vma               286 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	obj = vma->vm_private_data;
vma               288 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	return rockchip_drm_gem_object_mmap(obj, vma);
vma                38 drivers/gpu/drm/rockchip/rockchip_drm_gem.h int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                42 drivers/gpu/drm/rockchip/rockchip_drm_gem.h 			  struct vm_area_struct *vma);
vma               180 drivers/gpu/drm/tegra/fb.c static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               188 drivers/gpu/drm/tegra/fb.c 	err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
vma               192 drivers/gpu/drm/tegra/fb.c 	return __tegra_gem_mmap(&bo->gem, vma);
vma               427 drivers/gpu/drm/tegra/gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               428 drivers/gpu/drm/tegra/gem.c 	struct drm_gem_object *gem = vma->vm_private_data;
vma               436 drivers/gpu/drm/tegra/gem.c 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               439 drivers/gpu/drm/tegra/gem.c 	return vmf_insert_page(vma, vmf->address, page);
vma               448 drivers/gpu/drm/tegra/gem.c int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma               453 drivers/gpu/drm/tegra/gem.c 		unsigned long vm_pgoff = vma->vm_pgoff;
vma               461 drivers/gpu/drm/tegra/gem.c 		vma->vm_flags &= ~VM_PFNMAP;
vma               462 drivers/gpu/drm/tegra/gem.c 		vma->vm_pgoff = 0;
vma               464 drivers/gpu/drm/tegra/gem.c 		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
vma               467 drivers/gpu/drm/tegra/gem.c 			drm_gem_vm_close(vma);
vma               471 drivers/gpu/drm/tegra/gem.c 		vma->vm_pgoff = vm_pgoff;
vma               473 drivers/gpu/drm/tegra/gem.c 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vma               475 drivers/gpu/drm/tegra/gem.c 		vma->vm_flags |= VM_MIXEDMAP;
vma               476 drivers/gpu/drm/tegra/gem.c 		vma->vm_flags &= ~VM_PFNMAP;
vma               478 drivers/gpu/drm/tegra/gem.c 		vma->vm_page_prot = pgprot_writecombine(prot);
vma               484 drivers/gpu/drm/tegra/gem.c int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
vma               489 drivers/gpu/drm/tegra/gem.c 	err = drm_gem_mmap(file, vma);
vma               493 drivers/gpu/drm/tegra/gem.c 	gem = vma->vm_private_data;
vma               495 drivers/gpu/drm/tegra/gem.c 	return __tegra_gem_mmap(gem, vma);
vma               595 drivers/gpu/drm/tegra/gem.c static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
vma               600 drivers/gpu/drm/tegra/gem.c 	err = drm_gem_mmap_obj(gem, gem->size, vma);
vma               604 drivers/gpu/drm/tegra/gem.c 	return __tegra_gem_mmap(gem, vma);
vma                69 drivers/gpu/drm/tegra/gem.h int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
vma                70 drivers/gpu/drm/tegra/gem.h int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
vma                72 drivers/gpu/drm/ttm/ttm_bo_vm.c 		up_read(&vmf->vma->vm_mm->mmap_sem);
vma               111 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct vm_area_struct *vma = vmf->vma;
vma               113 drivers/gpu/drm/ttm/ttm_bo_vm.c 	    vma->vm_private_data;
vma               138 drivers/gpu/drm/ttm/ttm_bo_vm.c 				up_read(&vmf->vma->vm_mm->mmap_sem);
vma               213 drivers/gpu/drm/ttm/ttm_bo_vm.c 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma               214 drivers/gpu/drm/ttm/ttm_bo_vm.c 		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
vma               215 drivers/gpu/drm/ttm/ttm_bo_vm.c 	page_last = vma_pages(vma) + vma->vm_pgoff -
vma               228 drivers/gpu/drm/ttm/ttm_bo_vm.c 	cvma = *vma;
vma               275 drivers/gpu/drm/ttm/ttm_bo_vm.c 		if (vma->vm_flags & VM_MIXEDMAP)
vma               301 drivers/gpu/drm/ttm/ttm_bo_vm.c static void ttm_bo_vm_open(struct vm_area_struct *vma)
vma               304 drivers/gpu/drm/ttm/ttm_bo_vm.c 	    (struct ttm_buffer_object *)vma->vm_private_data;
vma               306 drivers/gpu/drm/ttm/ttm_bo_vm.c 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
vma               311 drivers/gpu/drm/ttm/ttm_bo_vm.c static void ttm_bo_vm_close(struct vm_area_struct *vma)
vma               313 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
vma               316 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_private_data = NULL;
vma               358 drivers/gpu/drm/ttm/ttm_bo_vm.c static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
vma               361 drivers/gpu/drm/ttm/ttm_bo_vm.c 	unsigned long offset = (addr) - vma->vm_start;
vma               362 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct ttm_buffer_object *bo = vma->vm_private_data;
vma               427 drivers/gpu/drm/ttm/ttm_bo_vm.c int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma               434 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
vma               437 drivers/gpu/drm/ttm/ttm_bo_vm.c 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
vma               450 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_ops = &ttm_bo_vm_ops;
vma               457 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_private_data = bo;
vma               466 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               467 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma               475 drivers/gpu/drm/ttm/ttm_bo_vm.c int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma               477 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (vma->vm_pgoff != 0)
vma               482 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_ops = &ttm_bo_vm_ops;
vma               483 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_private_data = bo;
vma               484 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               485 drivers/gpu/drm/ttm/ttm_bo_vm.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
vma               157 drivers/gpu/drm/udl/udl_dmabuf.c 			   struct vm_area_struct *vma)
vma               143 drivers/gpu/drm/udl/udl_drv.h int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma               160 drivers/gpu/drm/udl/udl_fb.c static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               162 drivers/gpu/drm/udl/udl_fb.c 	unsigned long start = vma->vm_start;
vma               163 drivers/gpu/drm/udl/udl_fb.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               167 drivers/gpu/drm/udl/udl_fb.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma               170 drivers/gpu/drm/udl/udl_fb.c 	offset = vma->vm_pgoff << PAGE_SHIFT;
vma               181 drivers/gpu/drm/udl/udl_fb.c 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma               185 drivers/gpu/drm/udl/udl_fb.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
vma                61 drivers/gpu/drm/udl/udl_gem.c 				 struct vm_area_struct *vma)
vma                67 drivers/gpu/drm/udl/udl_gem.c 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma                69 drivers/gpu/drm/udl/udl_gem.c 		vma->vm_page_prot =
vma                70 drivers/gpu/drm/udl/udl_gem.c 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma                72 drivers/gpu/drm/udl/udl_gem.c 		vma->vm_page_prot =
vma                73 drivers/gpu/drm/udl/udl_gem.c 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
vma                87 drivers/gpu/drm/udl/udl_gem.c int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma                91 drivers/gpu/drm/udl/udl_gem.c 	ret = drm_gem_mmap(filp, vma);
vma                95 drivers/gpu/drm/udl/udl_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma                96 drivers/gpu/drm/udl/udl_gem.c 	vma->vm_flags |= VM_MIXEDMAP;
vma                98 drivers/gpu/drm/udl/udl_gem.c 	update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
vma               105 drivers/gpu/drm/udl/udl_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma               106 drivers/gpu/drm/udl/udl_gem.c 	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
vma               110 drivers/gpu/drm/udl/udl_gem.c 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               116 drivers/gpu/drm/udl/udl_gem.c 	return vmf_insert_page(vma, vmf->address, page);
vma               689 drivers/gpu/drm/vc4/vc4_bo.c 	struct vm_area_struct *vma = vmf->vma;
vma               690 drivers/gpu/drm/vc4/vc4_bo.c 	struct drm_gem_object *obj = vma->vm_private_data;
vma               703 drivers/gpu/drm/vc4/vc4_bo.c int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
vma               710 drivers/gpu/drm/vc4/vc4_bo.c 	ret = drm_gem_mmap(filp, vma);
vma               714 drivers/gpu/drm/vc4/vc4_bo.c 	gem_obj = vma->vm_private_data;
vma               717 drivers/gpu/drm/vc4/vc4_bo.c 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
vma               734 drivers/gpu/drm/vc4/vc4_bo.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               746 drivers/gpu/drm/vc4/vc4_bo.c 	vm_pgoff = vma->vm_pgoff;
vma               747 drivers/gpu/drm/vc4/vc4_bo.c 	vma->vm_pgoff = 0;
vma               748 drivers/gpu/drm/vc4/vc4_bo.c 	ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
vma               749 drivers/gpu/drm/vc4/vc4_bo.c 			  bo->base.paddr, vma->vm_end - vma->vm_start);
vma               750 drivers/gpu/drm/vc4/vc4_bo.c 	vma->vm_pgoff = vm_pgoff;
vma               753 drivers/gpu/drm/vc4/vc4_bo.c 		drm_gem_vm_close(vma);
vma               758 drivers/gpu/drm/vc4/vc4_bo.c int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma               762 drivers/gpu/drm/vc4/vc4_bo.c 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
vma               767 drivers/gpu/drm/vc4/vc4_bo.c 	return drm_gem_cma_prime_mmap(obj, vma);
vma               731 drivers/gpu/drm/vc4/vc4_drv.h int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
vma               732 drivers/gpu/drm/vc4/vc4_drv.h int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
vma                73 drivers/gpu/drm/vgem/vgem_drv.c 	struct vm_area_struct *vma = vmf->vma;
vma                74 drivers/gpu/drm/vgem/vgem_drv.c 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
vma                80 drivers/gpu/drm/vgem/vgem_drv.c 	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
vma               263 drivers/gpu/drm/vgem/vgem_drv.c static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               265 drivers/gpu/drm/vgem/vgem_drv.c 	unsigned long flags = vma->vm_flags;
vma               268 drivers/gpu/drm/vgem/vgem_drv.c 	ret = drm_gem_mmap(filp, vma);
vma               275 drivers/gpu/drm/vgem/vgem_drv.c 	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
vma               407 drivers/gpu/drm/vgem/vgem_drv.c 			   struct vm_area_struct *vma)
vma               411 drivers/gpu/drm/vgem/vgem_drv.c 	if (obj->size < vma->vm_end - vma->vm_start)
vma               417 drivers/gpu/drm/vgem/vgem_drv.c 	ret = call_mmap(obj->filp, vma);
vma               421 drivers/gpu/drm/vgem/vgem_drv.c 	fput(vma->vm_file);
vma               422 drivers/gpu/drm/vgem/vgem_drv.c 	vma->vm_file = get_file(obj->filp);
vma               423 drivers/gpu/drm/vgem/vgem_drv.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               424 drivers/gpu/drm/vgem/vgem_drv.c 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma               345 drivers/gpu/drm/virtio/virtgpu_drv.h int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
vma               377 drivers/gpu/drm/virtio/virtgpu_drv.h 			   struct vm_area_struct *vma);
vma                69 drivers/gpu/drm/virtio/virtgpu_prime.c 			   struct vm_area_struct *vma)
vma                71 drivers/gpu/drm/virtio/virtgpu_prime.c 	return drm_gem_prime_mmap(obj, vma);
vma                52 drivers/gpu/drm/virtio/virtgpu_ttm.c int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
vma                65 drivers/gpu/drm/virtio/virtgpu_ttm.c 	r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
vma                45 drivers/gpu/drm/vkms/vkms_gem.c 	struct vm_area_struct *vma = vmf->vma;
vma                46 drivers/gpu/drm/vkms/vkms_gem.c 	struct vkms_gem_object *obj = vma->vm_private_data;
vma                52 drivers/gpu/drm/vkms/vkms_gem.c 	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
vma               913 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
vma                87 drivers/gpu/drm/vmwgfx/vmwgfx_prime.c 				 struct vm_area_struct *vma)
vma                30 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
vma                35 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c 	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
vma               224 drivers/gpu/drm/xen/xen_drm_front_gem.c 			struct vm_area_struct *vma)
vma               233 drivers/gpu/drm/xen/xen_drm_front_gem.c 	vma->vm_flags &= ~VM_PFNMAP;
vma               234 drivers/gpu/drm/xen/xen_drm_front_gem.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               235 drivers/gpu/drm/xen/xen_drm_front_gem.c 	vma->vm_pgoff = 0;
vma               243 drivers/gpu/drm/xen/xen_drm_front_gem.c 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma               253 drivers/gpu/drm/xen/xen_drm_front_gem.c 	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
vma               260 drivers/gpu/drm/xen/xen_drm_front_gem.c int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma               266 drivers/gpu/drm/xen/xen_drm_front_gem.c 	ret = drm_gem_mmap(filp, vma);
vma               270 drivers/gpu/drm/xen/xen_drm_front_gem.c 	gem_obj = vma->vm_private_data;
vma               272 drivers/gpu/drm/xen/xen_drm_front_gem.c 	return gem_mmap_obj(xen_obj, vma);
vma               294 drivers/gpu/drm/xen/xen_drm_front_gem.c 				 struct vm_area_struct *vma)
vma               299 drivers/gpu/drm/xen/xen_drm_front_gem.c 	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
vma               304 drivers/gpu/drm/xen/xen_drm_front_gem.c 	return gem_mmap_obj(xen_obj, vma);
vma                35 drivers/gpu/drm/xen/xen_drm_front_gem.h int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                43 drivers/gpu/drm/xen/xen_drm_front_gem.h 				 struct vm_area_struct *vma);
vma              1090 drivers/hsi/clients/cmt_speech.c 	struct cs_char *csdata = vmf->vma->vm_private_data;
vma              1260 drivers/hsi/clients/cmt_speech.c static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
vma              1262 drivers/hsi/clients/cmt_speech.c 	if (vma->vm_end < vma->vm_start)
vma              1265 drivers/hsi/clients/cmt_speech.c 	if (vma_pages(vma) != 1)
vma              1268 drivers/hsi/clients/cmt_speech.c 	vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
vma              1269 drivers/hsi/clients/cmt_speech.c 	vma->vm_ops = &cs_char_vm_ops;
vma              1270 drivers/hsi/clients/cmt_speech.c 	vma->vm_private_data = file->private_data;
vma              1547 drivers/hwtracing/intel_th/msu.c static void msc_mmap_open(struct vm_area_struct *vma)
vma              1549 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vma->vm_file->private_data;
vma              1555 drivers/hwtracing/intel_th/msu.c static void msc_mmap_close(struct vm_area_struct *vma)
vma              1557 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vma->vm_file->private_data;
vma              1582 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
vma              1590 drivers/hwtracing/intel_th/msu.c 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vma              1602 drivers/hwtracing/intel_th/msu.c static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
vma              1604 drivers/hwtracing/intel_th/msu.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma              1605 drivers/hwtracing/intel_th/msu.c 	struct msc_iter *iter = vma->vm_file->private_data;
vma              1612 drivers/hwtracing/intel_th/msu.c 	if (vma->vm_pgoff)
vma              1633 drivers/hwtracing/intel_th/msu.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1634 drivers/hwtracing/intel_th/msu.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
vma              1635 drivers/hwtracing/intel_th/msu.c 	vma->vm_ops = &msc_mmap_ops;
vma               669 drivers/hwtracing/stm/core.c static void stm_mmap_open(struct vm_area_struct *vma)
vma               671 drivers/hwtracing/stm/core.c 	struct stm_file *stmf = vma->vm_file->private_data;
vma               677 drivers/hwtracing/stm/core.c static void stm_mmap_close(struct vm_area_struct *vma)
vma               679 drivers/hwtracing/stm/core.c 	struct stm_file *stmf = vma->vm_file->private_data;
vma               691 drivers/hwtracing/stm/core.c static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
vma               700 drivers/hwtracing/stm/core.c 	if (vma->vm_pgoff)
vma               703 drivers/hwtracing/stm/core.c 	size = vma->vm_end - vma->vm_start;
vma               717 drivers/hwtracing/stm/core.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               718 drivers/hwtracing/stm/core.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma               719 drivers/hwtracing/stm/core.c 	vma->vm_ops = &stm_mmap_vmops;
vma               720 drivers/hwtracing/stm/core.c 	vm_iomap_memory(vma, phys, size);
vma               269 drivers/infiniband/core/umem.c 		goto vma;
vma               316 drivers/infiniband/core/umem.c vma:
vma               417 drivers/infiniband/core/umem_odp.c 		struct vm_area_struct *vma;
vma               421 drivers/infiniband/core/umem_odp.c 		vma = find_vma(mm, ib_umem_start(umem_odp));
vma               422 drivers/infiniband/core/umem_odp.c 		if (!vma || !is_vm_hugetlb_page(vma)) {
vma               427 drivers/infiniband/core/umem_odp.c 		h = hstate_vma(vma);
vma               773 drivers/infiniband/core/uverbs_main.c static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
vma               787 drivers/infiniband/core/uverbs_main.c 	ret = ucontext->device->ops.mmap(ucontext, vma);
vma               805 drivers/infiniband/core/uverbs_main.c 	struct vm_area_struct *vma;
vma               812 drivers/infiniband/core/uverbs_main.c 				struct vm_area_struct *vma)
vma               814 drivers/infiniband/core/uverbs_main.c 	struct ib_uverbs_file *ufile = vma->vm_file->private_data;
vma               816 drivers/infiniband/core/uverbs_main.c 	priv->vma = vma;
vma               817 drivers/infiniband/core/uverbs_main.c 	vma->vm_private_data = priv;
vma               818 drivers/infiniband/core/uverbs_main.c 	vma->vm_ops = &rdma_umap_ops;
vma               829 drivers/infiniband/core/uverbs_main.c static void rdma_umap_open(struct vm_area_struct *vma)
vma               831 drivers/infiniband/core/uverbs_main.c 	struct ib_uverbs_file *ufile = vma->vm_file->private_data;
vma               832 drivers/infiniband/core/uverbs_main.c 	struct rdma_umap_priv *opriv = vma->vm_private_data;
vma               850 drivers/infiniband/core/uverbs_main.c 	rdma_umap_priv_init(priv, vma);
vma               863 drivers/infiniband/core/uverbs_main.c 	vma->vm_private_data = NULL;
vma               864 drivers/infiniband/core/uverbs_main.c 	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
vma               867 drivers/infiniband/core/uverbs_main.c static void rdma_umap_close(struct vm_area_struct *vma)
vma               869 drivers/infiniband/core/uverbs_main.c 	struct ib_uverbs_file *ufile = vma->vm_file->private_data;
vma               870 drivers/infiniband/core/uverbs_main.c 	struct rdma_umap_priv *priv = vma->vm_private_data;
vma               892 drivers/infiniband/core/uverbs_main.c 	struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
vma               893 drivers/infiniband/core/uverbs_main.c 	struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
vma               900 drivers/infiniband/core/uverbs_main.c 	if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
vma               937 drivers/infiniband/core/uverbs_main.c int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
vma               943 drivers/infiniband/core/uverbs_main.c 	if (!(vma->vm_flags & VM_SHARED))
vma               946 drivers/infiniband/core/uverbs_main.c 	if (vma->vm_end - vma->vm_start != size)
vma               950 drivers/infiniband/core/uverbs_main.c 	if (WARN_ON(!vma->vm_file ||
vma               951 drivers/infiniband/core/uverbs_main.c 		    vma->vm_file->private_data != ufile))
vma               959 drivers/infiniband/core/uverbs_main.c 	vma->vm_page_prot = prot;
vma               960 drivers/infiniband/core/uverbs_main.c 	if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
vma               965 drivers/infiniband/core/uverbs_main.c 	rdma_umap_priv_init(priv, vma);
vma               986 drivers/infiniband/core/uverbs_main.c 			mm = priv->vma->vm_mm;
vma              1011 drivers/infiniband/core/uverbs_main.c 			struct vm_area_struct *vma = priv->vma;
vma              1013 drivers/infiniband/core/uverbs_main.c 			if (vma->vm_mm != mm)
vma              1017 drivers/infiniband/core/uverbs_main.c 			zap_vma_ptes(vma, vma->vm_start,
vma              1018 drivers/infiniband/core/uverbs_main.c 				     vma->vm_end - vma->vm_start);
vma              3686 drivers/infiniband/hw/bnxt_re/ib_verbs.c int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
vma              3694 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
vma              3697 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (vma->vm_pgoff) {
vma              3698 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              3699 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma              3700 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				       PAGE_SIZE, vma->vm_page_prot)) {
vma              3706 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (remap_pfn_range(vma, vma->vm_start,
vma              3707 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
vma               213 drivers/infiniband/hw/bnxt_re/ib_verbs.h int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
vma               237 drivers/infiniband/hw/cxgb3/iwch_provider.c static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma               239 drivers/infiniband/hw/cxgb3/iwch_provider.c 	int len = vma->vm_end - vma->vm_start;
vma               240 drivers/infiniband/hw/cxgb3/iwch_provider.c 	u32 key = vma->vm_pgoff << PAGE_SHIFT;
vma               247 drivers/infiniband/hw/cxgb3/iwch_provider.c 	pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
vma               250 drivers/infiniband/hw/cxgb3/iwch_provider.c 	if (vma->vm_start & (PAGE_SIZE-1)) {
vma               270 drivers/infiniband/hw/cxgb3/iwch_provider.c 		if (vma->vm_flags & VM_READ) {
vma               274 drivers/infiniband/hw/cxgb3/iwch_provider.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               275 drivers/infiniband/hw/cxgb3/iwch_provider.c 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma               276 drivers/infiniband/hw/cxgb3/iwch_provider.c 		vma->vm_flags &= ~VM_MAYREAD;
vma               277 drivers/infiniband/hw/cxgb3/iwch_provider.c 		ret = io_remap_pfn_range(vma, vma->vm_start,
vma               279 drivers/infiniband/hw/cxgb3/iwch_provider.c 				         len, vma->vm_page_prot);
vma               285 drivers/infiniband/hw/cxgb3/iwch_provider.c 		ret = remap_pfn_range(vma, vma->vm_start,
vma               287 drivers/infiniband/hw/cxgb3/iwch_provider.c 				      len, vma->vm_page_prot);
vma               124 drivers/infiniband/hw/cxgb4/provider.c static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma               126 drivers/infiniband/hw/cxgb4/provider.c 	int len = vma->vm_end - vma->vm_start;
vma               127 drivers/infiniband/hw/cxgb4/provider.c 	u32 key = vma->vm_pgoff << PAGE_SHIFT;
vma               134 drivers/infiniband/hw/cxgb4/provider.c 	pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
vma               137 drivers/infiniband/hw/cxgb4/provider.c 	if (vma->vm_start & (PAGE_SIZE-1))
vma               156 drivers/infiniband/hw/cxgb4/provider.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               157 drivers/infiniband/hw/cxgb4/provider.c 		ret = io_remap_pfn_range(vma, vma->vm_start,
vma               159 drivers/infiniband/hw/cxgb4/provider.c 					 len, vma->vm_page_prot);
vma               168 drivers/infiniband/hw/cxgb4/provider.c 			vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
vma               171 drivers/infiniband/hw/cxgb4/provider.c 				vma->vm_page_prot =
vma               172 drivers/infiniband/hw/cxgb4/provider.c 					t4_pgprot_wc(vma->vm_page_prot);
vma               174 drivers/infiniband/hw/cxgb4/provider.c 				vma->vm_page_prot =
vma               175 drivers/infiniband/hw/cxgb4/provider.c 					pgprot_noncached(vma->vm_page_prot);
vma               177 drivers/infiniband/hw/cxgb4/provider.c 		ret = io_remap_pfn_range(vma, vma->vm_start,
vma               179 drivers/infiniband/hw/cxgb4/provider.c 					 len, vma->vm_page_prot);
vma               185 drivers/infiniband/hw/cxgb4/provider.c 		ret = remap_pfn_range(vma, vma->vm_start,
vma               187 drivers/infiniband/hw/cxgb4/provider.c 				      len, vma->vm_page_prot);
vma               149 drivers/infiniband/hw/efa/efa.h 	     struct vm_area_struct *vma);
vma              1593 drivers/infiniband/hw/efa/efa_verbs.c 		      struct vm_area_struct *vma, u64 key, u64 length)
vma              1614 drivers/infiniband/hw/efa/efa_verbs.c 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
vma              1615 drivers/infiniband/hw/efa/efa_verbs.c 					pgprot_noncached(vma->vm_page_prot));
vma              1618 drivers/infiniband/hw/efa/efa_verbs.c 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
vma              1619 drivers/infiniband/hw/efa/efa_verbs.c 					pgprot_writecombine(vma->vm_page_prot));
vma              1622 drivers/infiniband/hw/efa/efa_verbs.c 		for (va = vma->vm_start; va < vma->vm_end;
vma              1624 drivers/infiniband/hw/efa/efa_verbs.c 			err = vm_insert_page(vma, va, pfn_to_page(pfn));
vma              1645 drivers/infiniband/hw/efa/efa_verbs.c 	     struct vm_area_struct *vma)
vma              1649 drivers/infiniband/hw/efa/efa_verbs.c 	u64 length = vma->vm_end - vma->vm_start;
vma              1650 drivers/infiniband/hw/efa/efa_verbs.c 	u64 key = vma->vm_pgoff << PAGE_SHIFT;
vma              1654 drivers/infiniband/hw/efa/efa_verbs.c 		  vma->vm_start, vma->vm_end, length, key);
vma              1656 drivers/infiniband/hw/efa/efa_verbs.c 	if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
vma              1659 drivers/infiniband/hw/efa/efa_verbs.c 			  length, PAGE_SIZE, vma->vm_flags);
vma              1663 drivers/infiniband/hw/efa/efa_verbs.c 	if (vma->vm_flags & VM_EXEC) {
vma              1668 drivers/infiniband/hw/efa/efa_verbs.c 	return __efa_mmap(dev, ucontext, vma, key, length);
vma                78 drivers/infiniband/hw/hfi1/file_ops.c static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
vma               350 drivers/infiniband/hw/hfi1/file_ops.c static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vma               356 drivers/infiniband/hw/hfi1/file_ops.c 	u64 token = vma->vm_pgoff << PAGE_SHIFT,
vma               365 drivers/infiniband/hw/hfi1/file_ops.c 	    !(vma->vm_flags & VM_SHARED)) {
vma               378 drivers/infiniband/hw/hfi1/file_ops.c 	flags = vma->vm_flags;
vma               396 drivers/infiniband/hw/hfi1/file_ops.c 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               437 drivers/infiniband/hw/hfi1/file_ops.c 		if ((vma->vm_end - vma->vm_start) != memlen) {
vma               439 drivers/infiniband/hw/hfi1/file_ops.c 				   (vma->vm_end - vma->vm_start), memlen);
vma               443 drivers/infiniband/hw/hfi1/file_ops.c 		if (vma->vm_flags & VM_WRITE) {
vma               447 drivers/infiniband/hw/hfi1/file_ops.c 		vma->vm_flags &= ~VM_MAYWRITE;
vma               448 drivers/infiniband/hw/hfi1/file_ops.c 		addr = vma->vm_start;
vma               453 drivers/infiniband/hw/hfi1/file_ops.c 				vma, addr,
vma               461 drivers/infiniband/hw/hfi1/file_ops.c 				vma->vm_page_prot);
vma               483 drivers/infiniband/hw/hfi1/file_ops.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               564 drivers/infiniband/hw/hfi1/file_ops.c 	if ((vma->vm_end - vma->vm_start) != memlen) {
vma               567 drivers/infiniband/hw/hfi1/file_ops.c 			  (vma->vm_end - vma->vm_start), memlen);
vma               572 drivers/infiniband/hw/hfi1/file_ops.c 	vma->vm_flags = flags;
vma               576 drivers/infiniband/hw/hfi1/file_ops.c 		    vma->vm_end - vma->vm_start, vma->vm_flags);
vma               578 drivers/infiniband/hw/hfi1/file_ops.c 		vma->vm_pgoff = PFN_DOWN(memaddr);
vma               579 drivers/infiniband/hw/hfi1/file_ops.c 		vma->vm_ops = &vm_ops;
vma               582 drivers/infiniband/hw/hfi1/file_ops.c 		ret = io_remap_pfn_range(vma, vma->vm_start,
vma               585 drivers/infiniband/hw/hfi1/file_ops.c 					 vma->vm_page_prot);
vma               587 drivers/infiniband/hw/hfi1/file_ops.c 		ret = remap_pfn_range(vma, vma->vm_start,
vma               590 drivers/infiniband/hw/hfi1/file_ops.c 				      vma->vm_page_prot);
vma               592 drivers/infiniband/hw/hfi1/file_ops.c 		ret = remap_pfn_range(vma, vma->vm_start,
vma               595 drivers/infiniband/hw/hfi1/file_ops.c 				      vma->vm_page_prot);
vma               353 drivers/infiniband/hw/hns/hns_roce_main.c 			 struct vm_area_struct *vma)
vma               357 drivers/infiniband/hw/hns/hns_roce_main.c 	switch (vma->vm_pgoff) {
vma               359 drivers/infiniband/hw/hns/hns_roce_main.c 		return rdma_user_mmap_io(context, vma,
vma               362 drivers/infiniband/hw/hns/hns_roce_main.c 					 pgprot_noncached(vma->vm_page_prot));
vma               372 drivers/infiniband/hw/hns/hns_roce_main.c 		return rdma_user_mmap_io(context, vma,
vma               375 drivers/infiniband/hw/hns/hns_roce_main.c 					 vma->vm_page_prot);
vma               169 drivers/infiniband/hw/i40iw/i40iw_verbs.c static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma               179 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		if (vma->vm_pgoff)
vma               180 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
vma               184 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		if (vma->vm_pgoff)
vma               185 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
vma               188 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
vma               190 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma               191 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               192 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		vma->vm_private_data = ucontext;
vma               194 drivers/infiniband/hw/i40iw/i40iw_verbs.c 		if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma               195 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               197 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               200 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	if (io_remap_pfn_range(vma, vma->vm_start,
vma               201 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			       vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
vma               202 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			       PAGE_SIZE, vma->vm_page_prot))
vma              1152 drivers/infiniband/hw/mlx4/main.c static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma              1156 drivers/infiniband/hw/mlx4/main.c 	switch (vma->vm_pgoff) {
vma              1158 drivers/infiniband/hw/mlx4/main.c 		return rdma_user_mmap_io(context, vma,
vma              1161 drivers/infiniband/hw/mlx4/main.c 					 pgprot_noncached(vma->vm_page_prot));
vma              1167 drivers/infiniband/hw/mlx4/main.c 			context, vma,
vma              1170 drivers/infiniband/hw/mlx4/main.c 			PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
vma              1181 drivers/infiniband/hw/mlx4/main.c 			context, vma,
vma              1186 drivers/infiniband/hw/mlx4/main.c 			PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
vma               381 drivers/infiniband/hw/mlx4/mr.c 		struct vm_area_struct *vma;
vma               389 drivers/infiniband/hw/mlx4/mr.c 		vma = find_vma(current->mm, untagged_start);
vma               390 drivers/infiniband/hw/mlx4/mr.c 		if (vma && vma->vm_end >= untagged_start + length &&
vma               391 drivers/infiniband/hw/mlx4/mr.c 		    vma->vm_start <= untagged_start) {
vma               392 drivers/infiniband/hw/mlx4/mr.c 			if (vma->vm_flags & VM_WRITE)
vma              2059 drivers/infiniband/hw/mlx5/main.c 					struct vm_area_struct *vma,
vma              2062 drivers/infiniband/hw/mlx5/main.c 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
vma              2063 drivers/infiniband/hw/mlx5/main.c 	    !(vma->vm_flags & VM_SHARED))
vma              2066 drivers/infiniband/hw/mlx5/main.c 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
vma              2069 drivers/infiniband/hw/mlx5/main.c 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
vma              2071 drivers/infiniband/hw/mlx5/main.c 	vma->vm_flags &= ~VM_MAYWRITE;
vma              2076 drivers/infiniband/hw/mlx5/main.c 	return vm_insert_page(vma, vma->vm_start,
vma              2081 drivers/infiniband/hw/mlx5/main.c 		    struct vm_area_struct *vma,
vma              2095 drivers/infiniband/hw/mlx5/main.c 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
vma              2099 drivers/infiniband/hw/mlx5/main.c 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
vma              2101 drivers/infiniband/hw/mlx5/main.c 		idx = get_index(vma->vm_pgoff);
vma              2122 drivers/infiniband/hw/mlx5/main.c 		prot = pgprot_writecombine(vma->vm_page_prot);
vma              2125 drivers/infiniband/hw/mlx5/main.c 		prot = pgprot_noncached(vma->vm_page_prot);
vma              2167 drivers/infiniband/hw/mlx5/main.c 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
vma              2192 drivers/infiniband/hw/mlx5/main.c static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma              2196 drivers/infiniband/hw/mlx5/main.c 	u16 page_idx = get_extended_index(vma->vm_pgoff);
vma              2197 drivers/infiniband/hw/mlx5/main.c 	size_t map_size = vma->vm_end - vma->vm_start;
vma              2209 drivers/infiniband/hw/mlx5/main.c 	return rdma_user_mmap_io(context, vma, pfn, map_size,
vma              2210 drivers/infiniband/hw/mlx5/main.c 				 pgprot_writecombine(vma->vm_page_prot));
vma              2213 drivers/infiniband/hw/mlx5/main.c static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
vma              2220 drivers/infiniband/hw/mlx5/main.c 	command = get_command(vma->vm_pgoff);
vma              2226 drivers/infiniband/hw/mlx5/main.c 		return uar_mmap(dev, command, vma, context);
vma              2232 drivers/infiniband/hw/mlx5/main.c 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
vma              2235 drivers/infiniband/hw/mlx5/main.c 		if (vma->vm_flags & VM_WRITE)
vma              2237 drivers/infiniband/hw/mlx5/main.c 		vma->vm_flags &= ~VM_MAYWRITE;
vma              2246 drivers/infiniband/hw/mlx5/main.c 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
vma              2248 drivers/infiniband/hw/mlx5/main.c 					 pgprot_noncached(vma->vm_page_prot));
vma              2250 drivers/infiniband/hw/mlx5/main.c 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
vma              2253 drivers/infiniband/hw/mlx5/main.c 		return dm_mmap(ibcontext, vma);
vma               351 drivers/infiniband/hw/mthca/mthca_provider.c 			  struct vm_area_struct *vma)
vma               353 drivers/infiniband/hw/mthca/mthca_provider.c 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
vma               356 drivers/infiniband/hw/mthca/mthca_provider.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               358 drivers/infiniband/hw/mthca/mthca_provider.c 	if (io_remap_pfn_range(vma, vma->vm_start,
vma               360 drivers/infiniband/hw/mthca/mthca_provider.c 			       PAGE_SIZE, vma->vm_page_prot))
vma               545 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma               549 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
vma               551 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	unsigned long len = (vma->vm_end - vma->vm_start);
vma               555 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	if (vma->vm_start & (PAGE_SIZE - 1))
vma               557 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
vma               564 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (vma->vm_flags & VM_READ)
vma               567 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               568 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma               569 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					    len, vma->vm_page_prot);
vma               575 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (vma->vm_flags & VM_READ)
vma               578 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               579 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma               580 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					    len, vma->vm_page_prot);
vma               582 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		status = remap_pfn_range(vma, vma->vm_start,
vma               583 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					 vma->vm_pgoff, len, vma->vm_page_prot);
vma                69 drivers/infiniband/hw/ocrdma/ocrdma_verbs.h int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
vma               387 drivers/infiniband/hw/qedr/verbs.c int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma               391 drivers/infiniband/hw/qedr/verbs.c 	unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
vma               392 drivers/infiniband/hw/qedr/verbs.c 	unsigned long len = (vma->vm_end - vma->vm_start);
vma               399 drivers/infiniband/hw/qedr/verbs.c 		 (void *)vma->vm_start, (void *)vma->vm_end,
vma               400 drivers/infiniband/hw/qedr/verbs.c 		 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
vma               402 drivers/infiniband/hw/qedr/verbs.c 	if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
vma               405 drivers/infiniband/hw/qedr/verbs.c 		       (void *)vma->vm_start, (void *)vma->vm_end);
vma               411 drivers/infiniband/hw/qedr/verbs.c 		       vma->vm_pgoff);
vma               424 drivers/infiniband/hw/qedr/verbs.c 	if (vma->vm_flags & VM_READ) {
vma               429 drivers/infiniband/hw/qedr/verbs.c 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               430 drivers/infiniband/hw/qedr/verbs.c 	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
vma               431 drivers/infiniband/hw/qedr/verbs.c 				  vma->vm_page_prot);
vma                49 drivers/infiniband/hw/qedr/verbs.h int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
vma               708 drivers/infiniband/hw/qib/qib_file_ops.c static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
vma               715 drivers/infiniband/hw/qib/qib_file_ops.c 	if ((vma->vm_end - vma->vm_start) > len) {
vma               718 drivers/infiniband/hw/qib/qib_file_ops.c 			 vma->vm_end - vma->vm_start, len);
vma               728 drivers/infiniband/hw/qib/qib_file_ops.c 		if (vma->vm_flags & VM_WRITE) {
vma               736 drivers/infiniband/hw/qib/qib_file_ops.c 		vma->vm_flags &= ~VM_MAYWRITE;
vma               740 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
vma               741 drivers/infiniband/hw/qib/qib_file_ops.c 			      len, vma->vm_page_prot);
vma               750 drivers/infiniband/hw/qib/qib_file_ops.c static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
vma               763 drivers/infiniband/hw/qib/qib_file_ops.c 	if ((vma->vm_end - vma->vm_start) > sz) {
vma               766 drivers/infiniband/hw/qib/qib_file_ops.c 			vma->vm_end - vma->vm_start);
vma               770 drivers/infiniband/hw/qib/qib_file_ops.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               772 drivers/infiniband/hw/qib/qib_file_ops.c 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma               773 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = io_remap_pfn_range(vma, vma->vm_start,
vma               775 drivers/infiniband/hw/qib/qib_file_ops.c 					 vma->vm_end - vma->vm_start,
vma               776 drivers/infiniband/hw/qib/qib_file_ops.c 					 vma->vm_page_prot);
vma               781 drivers/infiniband/hw/qib/qib_file_ops.c static int mmap_piobufs(struct vm_area_struct *vma,
vma               795 drivers/infiniband/hw/qib/qib_file_ops.c 	if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
vma               798 drivers/infiniband/hw/qib/qib_file_ops.c 			 vma->vm_end - vma->vm_start);
vma               806 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               813 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_flags &= ~VM_MAYREAD;
vma               814 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma               818 drivers/infiniband/hw/qib/qib_file_ops.c 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               820 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
vma               821 drivers/infiniband/hw/qib/qib_file_ops.c 				 vma->vm_end - vma->vm_start,
vma               822 drivers/infiniband/hw/qib/qib_file_ops.c 				 vma->vm_page_prot);
vma               827 drivers/infiniband/hw/qib/qib_file_ops.c static int mmap_rcvegrbufs(struct vm_area_struct *vma,
vma               838 drivers/infiniband/hw/qib/qib_file_ops.c 	if ((vma->vm_end - vma->vm_start) > total_size) {
vma               841 drivers/infiniband/hw/qib/qib_file_ops.c 			 vma->vm_end - vma->vm_start,
vma               847 drivers/infiniband/hw/qib/qib_file_ops.c 	if (vma->vm_flags & VM_WRITE) {
vma               850 drivers/infiniband/hw/qib/qib_file_ops.c 			vma->vm_flags);
vma               855 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_flags &= ~VM_MAYWRITE;
vma               857 drivers/infiniband/hw/qib/qib_file_ops.c 	start = vma->vm_start;
vma               861 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = remap_pfn_range(vma, start, pfn, size,
vma               862 drivers/infiniband/hw/qib/qib_file_ops.c 				      vma->vm_page_prot);
vma               893 drivers/infiniband/hw/qib/qib_file_ops.c static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma               936 drivers/infiniband/hw/qib/qib_file_ops.c 		if (vma->vm_flags & VM_WRITE) {
vma               939 drivers/infiniband/hw/qib/qib_file_ops.c 				 vma->vm_flags);
vma               947 drivers/infiniband/hw/qib/qib_file_ops.c 		vma->vm_flags &= ~VM_MAYWRITE;
vma               950 drivers/infiniband/hw/qib/qib_file_ops.c 	len = vma->vm_end - vma->vm_start;
vma               956 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma               957 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_ops = &qib_file_vm_ops;
vma               958 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               975 drivers/infiniband/hw/qib/qib_file_ops.c static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
vma               984 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!rcd || !(vma->vm_flags & VM_SHARED)) {
vma               999 drivers/infiniband/hw/qib/qib_file_ops.c 	pgaddr = vma->vm_pgoff << PAGE_SHIFT;
vma              1015 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
vma              1042 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = mmap_ureg(vma, dd, ureg);
vma              1044 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
vma              1047 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
vma              1051 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = mmap_rcvegrbufs(vma, rcd);
vma              1058 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
vma              1062 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
vma              1070 drivers/infiniband/hw/qib/qib_file_ops.c 	vma->vm_private_data = NULL;
vma              1076 drivers/infiniband/hw/qib/qib_file_ops.c 			 vma->vm_end - vma->vm_start);
vma               673 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 				struct vm_area_struct *vma)
vma               687 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	vma->vm_flags |= VM_IO;
vma               688 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               689 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 	vfid = vma->vm_pgoff;
vma               691 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 			vma->vm_pgoff, PAGE_SHIFT, vfid);
vma               698 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 			if ((vma->vm_end - vma->vm_start) != bar->len) {
vma               701 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 						vma->vm_end - vma->vm_start);
vma               711 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 			return remap_pfn_range(vma,
vma               712 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 						vma->vm_start,
vma               714 drivers/infiniband/hw/usnic/usnic_ib_verbs.c 						len, vma->vm_page_prot);
vma                71 drivers/infiniband/hw/usnic/usnic_ib_verbs.h 			struct vm_area_struct *vma);
vma               395 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
vma               398 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	unsigned long start = vma->vm_start;
vma               399 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               400 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               411 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma               412 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               413 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 	if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
vma               414 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c 			       vma->vm_page_prot))
vma               398 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
vma                85 drivers/infiniband/sw/rdmavt/mmap.c static void rvt_vma_open(struct vm_area_struct *vma)
vma                87 drivers/infiniband/sw/rdmavt/mmap.c 	struct rvt_mmap_info *ip = vma->vm_private_data;
vma                92 drivers/infiniband/sw/rdmavt/mmap.c static void rvt_vma_close(struct vm_area_struct *vma)
vma                94 drivers/infiniband/sw/rdmavt/mmap.c 	struct rvt_mmap_info *ip = vma->vm_private_data;
vma               111 drivers/infiniband/sw/rdmavt/mmap.c int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma               114 drivers/infiniband/sw/rdmavt/mmap.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               115 drivers/infiniband/sw/rdmavt/mmap.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               137 drivers/infiniband/sw/rdmavt/mmap.c 		ret = remap_vmalloc_range(vma, ip->obj, 0);
vma               140 drivers/infiniband/sw/rdmavt/mmap.c 		vma->vm_ops = &rvt_vm_ops;
vma               141 drivers/infiniband/sw/rdmavt/mmap.c 		vma->vm_private_data = ip;
vma               142 drivers/infiniband/sw/rdmavt/mmap.c 		rvt_vma_open(vma);
vma                55 drivers/infiniband/sw/rdmavt/mmap.h int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
vma                98 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
vma                66 drivers/infiniband/sw/rxe/rxe_mmap.c static void rxe_vma_open(struct vm_area_struct *vma)
vma                68 drivers/infiniband/sw/rxe/rxe_mmap.c 	struct rxe_mmap_info *ip = vma->vm_private_data;
vma                73 drivers/infiniband/sw/rxe/rxe_mmap.c static void rxe_vma_close(struct vm_area_struct *vma)
vma                75 drivers/infiniband/sw/rxe/rxe_mmap.c 	struct rxe_mmap_info *ip = vma->vm_private_data;
vma                91 drivers/infiniband/sw/rxe/rxe_mmap.c int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma                94 drivers/infiniband/sw/rxe/rxe_mmap.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma                95 drivers/infiniband/sw/rxe/rxe_mmap.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               128 drivers/infiniband/sw/rxe/rxe_mmap.c 	ret = remap_vmalloc_range(vma, ip->obj, 0);
vma               134 drivers/infiniband/sw/rxe/rxe_mmap.c 	vma->vm_ops = &rxe_vm_ops;
vma               135 drivers/infiniband/sw/rxe/rxe_mmap.c 	vma->vm_private_data = ip;
vma               136 drivers/infiniband/sw/rxe/rxe_mmap.c 	rxe_vma_open(vma);
vma                69 drivers/infiniband/sw/siw/siw_verbs.c int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
vma                73 drivers/infiniband/sw/siw/siw_verbs.c 	unsigned long off = vma->vm_pgoff;
vma                74 drivers/infiniband/sw/siw/siw_verbs.c 	int size = vma->vm_end - vma->vm_start;
vma                80 drivers/infiniband/sw/siw/siw_verbs.c 	if (vma->vm_start & (PAGE_SIZE - 1)) {
vma                90 drivers/infiniband/sw/siw/siw_verbs.c 	rv = remap_vmalloc_range(vma, uobj->addr, 0);
vma                85 drivers/infiniband/sw/siw/siw_verbs.h int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
vma               456 drivers/iommu/amd_iommu_v2.c static bool access_error(struct vm_area_struct *vma, struct fault *fault)
vma               469 drivers/iommu/amd_iommu_v2.c 	return (requested & ~vma->vm_flags) != 0;
vma               475 drivers/iommu/amd_iommu_v2.c 	struct vm_area_struct *vma;
vma               491 drivers/iommu/amd_iommu_v2.c 	vma = find_extend_vma(mm, address);
vma               492 drivers/iommu/amd_iommu_v2.c 	if (!vma || address < vma->vm_start)
vma               497 drivers/iommu/amd_iommu_v2.c 	if (access_error(vma, fault))
vma               500 drivers/iommu/amd_iommu_v2.c 	ret = handle_mm_fault(vma, address, flags);
vma               647 drivers/iommu/dma-iommu.c 		struct vm_area_struct *vma)
vma               649 drivers/iommu/dma-iommu.c 	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
vma              1028 drivers/iommu/dma-iommu.c static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma              1033 drivers/iommu/dma-iommu.c 	unsigned long pfn, off = vma->vm_pgoff;
vma              1036 drivers/iommu/dma-iommu.c 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
vma              1038 drivers/iommu/dma-iommu.c 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
vma              1041 drivers/iommu/dma-iommu.c 	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
vma              1048 drivers/iommu/dma-iommu.c 			return __iommu_dma_mmap(pages, size, vma);
vma              1054 drivers/iommu/dma-iommu.c 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
vma              1055 drivers/iommu/dma-iommu.c 			       vma->vm_end - vma->vm_start,
vma              1056 drivers/iommu/dma-iommu.c 			       vma->vm_page_prot);
vma               507 drivers/iommu/intel-svm.c static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
vma               520 drivers/iommu/intel-svm.c 	return (requested & ~vma->vm_flags) != 0;
vma               545 drivers/iommu/intel-svm.c 		struct vm_area_struct *vma;
vma               596 drivers/iommu/intel-svm.c 		vma = find_extend_vma(svm->mm, address);
vma               597 drivers/iommu/intel-svm.c 		if (!vma || address < vma->vm_start)
vma               600 drivers/iommu/intel-svm.c 		if (access_error(vma, req))
vma               603 drivers/iommu/intel-svm.c 		ret = handle_mm_fault(vma, address,
vma               576 drivers/md/dm-bufio.c 		io_req.mem.ptr.vma = (char *)b->data + offset;
vma               510 drivers/md/dm-io.c 		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
vma               512 drivers/md/dm-io.c 			dp->vma_invalidate_address = io_req->mem.ptr.vma;
vma               515 drivers/md/dm-io.c 		vm_dp_init(dp, io_req->mem.ptr.vma);
vma               477 drivers/md/dm-log.c 		lc->io_req.mem.ptr.vma = lc->disk_header;
vma               241 drivers/md/dm-snap-persistent.c 		.mem.ptr.vma = area,
vma               484 drivers/md/dm-writecache.c 		req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
vma               892 drivers/md/dm-writecache.c 	req.mem.ptr.vma = (char *)wc->memory_map;
vma               291 drivers/media/common/saa7146/saa7146_fops.c static int fops_mmap(struct file *file, struct vm_area_struct * vma)
vma               301 drivers/media/common/saa7146/saa7146_fops.c 		       file, vma);
vma               307 drivers/media/common/saa7146/saa7146_fops.c 		       file, vma);
vma               319 drivers/media/common/saa7146/saa7146_fops.c 	res = videobuf_mmap_mapper(q, vma);
vma              1143 drivers/media/common/siano/smscoreapi.h 				      struct vm_area_struct *vma);
vma              2147 drivers/media/common/videobuf2/videobuf2-core.c int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
vma              2149 drivers/media/common/videobuf2/videobuf2-core.c 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
vma              2163 drivers/media/common/videobuf2/videobuf2-core.c 	if (!(vma->vm_flags & VM_SHARED)) {
vma              2168 drivers/media/common/videobuf2/videobuf2-core.c 		if (!(vma->vm_flags & VM_WRITE)) {
vma              2173 drivers/media/common/videobuf2/videobuf2-core.c 		if (!(vma->vm_flags & VM_READ)) {
vma              2202 drivers/media/common/videobuf2/videobuf2-core.c 	if (length < (vma->vm_end - vma->vm_start)) {
vma              2214 drivers/media/common/videobuf2/videobuf2-core.c 	vma->vm_pgoff = 0;
vma              2216 drivers/media/common/videobuf2/videobuf2-core.c 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
vma               179 drivers/media/common/videobuf2/videobuf2-dma-contig.c static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
vma               189 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
vma               197 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
vma               198 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	vma->vm_private_data	= &buf->handler;
vma               199 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	vma->vm_ops		= &vb2_common_vm_ops;
vma               201 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	vma->vm_ops->open(vma);
vma               204 drivers/media/common/videobuf2/videobuf2-dma-contig.c 		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
vma               353 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	struct vm_area_struct *vma)
vma               355 drivers/media/common/videobuf2/videobuf2-dma-contig.c 	return vb2_dc_mmap(dbuf->priv, vma);
vma               327 drivers/media/common/videobuf2/videobuf2-dma-sg.c static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
vma               337 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	err = vm_map_pages(vma, buf->pages, buf->num_pages);
vma               346 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	vma->vm_private_data	= &buf->handler;
vma               347 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	vma->vm_ops		= &vb2_common_vm_ops;
vma               349 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	vma->vm_ops->open(vma);
vma               488 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	struct vm_area_struct *vma)
vma               490 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	return vb2_dma_sg_mmap(dbuf->priv, vma);
vma                89 drivers/media/common/videobuf2/videobuf2-memops.c static void vb2_common_vm_open(struct vm_area_struct *vma)
vma                91 drivers/media/common/videobuf2/videobuf2-memops.c 	struct vb2_vmarea_handler *h = vma->vm_private_data;
vma                94 drivers/media/common/videobuf2/videobuf2-memops.c 	       __func__, h, refcount_read(h->refcount), vma->vm_start,
vma                95 drivers/media/common/videobuf2/videobuf2-memops.c 	       vma->vm_end);
vma               107 drivers/media/common/videobuf2/videobuf2-memops.c static void vb2_common_vm_close(struct vm_area_struct *vma)
vma               109 drivers/media/common/videobuf2/videobuf2-memops.c 	struct vb2_vmarea_handler *h = vma->vm_private_data;
vma               112 drivers/media/common/videobuf2/videobuf2-memops.c 	       __func__, h, refcount_read(h->refcount), vma->vm_start,
vma               113 drivers/media/common/videobuf2/videobuf2-memops.c 	       vma->vm_end);
vma              1026 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
vma              1030 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_mmap(vdev->queue, vma);
vma               169 drivers/media/common/videobuf2/videobuf2-vmalloc.c static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
vma               179 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
vma               188 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	vma->vm_flags		|= VM_DONTEXPAND;
vma               193 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	vma->vm_private_data	= &buf->handler;
vma               194 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	vma->vm_ops		= &vb2_common_vm_ops;
vma               196 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	vma->vm_ops->open(vma);
vma               337 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	struct vm_area_struct *vma)
vma               339 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	return vb2_vmalloc_mmap(dbuf->priv, vma);
vma              1220 drivers/media/dvb-core/dmxdev.c static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)
vma              1236 drivers/media/dvb-core/dmxdev.c 	ret = dvb_vb2_mmap(&dmxdevfilter->vb2_ctx, vma);
vma              1370 drivers/media/dvb-core/dmxdev.c static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)
vma              1385 drivers/media/dvb-core/dmxdev.c 	ret = dvb_vb2_mmap(&dmxdev->dvr_vb2_ctx, vma);
vma               422 drivers/media/dvb-core/dvb_vb2.c int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma)
vma               426 drivers/media/dvb-core/dvb_vb2.c 	ret = vb2_mmap(&ctx->vb_q, vma);
vma              3076 drivers/media/pci/bt8xx/bttv-driver.c bttv_mmap(struct file *file, struct vm_area_struct *vma)
vma              3082 drivers/media/pci/bt8xx/bttv-driver.c 		vma->vm_start, vma->vm_end - vma->vm_start);
vma              3083 drivers/media/pci/bt8xx/bttv-driver.c 	return videobuf_mmap_mapper(bttv_queue(fh),vma);
vma               646 drivers/media/pci/cx18/cx18-fileops.c int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
vma               672 drivers/media/pci/cx18/cx18-fileops.c 		return videobuf_mmap_mapper(&s->vbuf_q, vma);
vma                22 drivers/media/pci/cx18/cx18-fileops.h int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma);
vma              1421 drivers/media/pci/meye/meye.c static void meye_vm_open(struct vm_area_struct *vma)
vma              1423 drivers/media/pci/meye/meye.c 	long idx = (long)vma->vm_private_data;
vma              1427 drivers/media/pci/meye/meye.c static void meye_vm_close(struct vm_area_struct *vma)
vma              1429 drivers/media/pci/meye/meye.c 	long idx = (long)vma->vm_private_data;
vma              1438 drivers/media/pci/meye/meye.c static int meye_mmap(struct file *file, struct vm_area_struct *vma)
vma              1440 drivers/media/pci/meye/meye.c 	unsigned long start = vma->vm_start;
vma              1441 drivers/media/pci/meye/meye.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma              1442 drivers/media/pci/meye/meye.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma              1467 drivers/media/pci/meye/meye.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
vma              1479 drivers/media/pci/meye/meye.c 	vma->vm_ops = &meye_vm_ops;
vma              1480 drivers/media/pci/meye/meye.c 	vma->vm_flags &= ~VM_IO;	/* not I/O memory */
vma              1481 drivers/media/pci/meye/meye.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma              1482 drivers/media/pci/meye/meye.c 	vma->vm_private_data = (void *) (offset / gbufsize);
vma              1483 drivers/media/pci/meye/meye.c 	meye_vm_open(vma);
vma               680 drivers/media/platform/davinci/vpfe_capture.c static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
vma               687 drivers/media/platform/davinci/vpfe_capture.c 	return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
vma               715 drivers/media/platform/exynos-gsc/gsc-m2m.c static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
vma               724 drivers/media/platform/exynos-gsc/gsc-m2m.c 	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
vma              1309 drivers/media/platform/fsl-viu.c static int viu_mmap(struct file *file, struct vm_area_struct *vma)
vma              1315 drivers/media/platform/fsl-viu.c 	dprintk(1, "mmap called, vma=%p\n", vma);
vma              1319 drivers/media/platform/fsl-viu.c 	ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
vma              1323 drivers/media/platform/fsl-viu.c 		(unsigned long)vma->vm_start,
vma              1324 drivers/media/platform/fsl-viu.c 		(unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
vma              1392 drivers/media/platform/omap3isp/ispvideo.c static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
vma              1396 drivers/media/platform/omap3isp/ispvideo.c 	return vb2_mmap(&vfh->queue, vma);
vma               609 drivers/media/platform/s3c-camif/camif-capture.c static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
vma               617 drivers/media/platform/s3c-camif/camif-capture.c 		ret = vb2_mmap(&vp->vb_queue, vma);
vma              1043 drivers/media/platform/s5p-mfc/s5p_mfc.c static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma              1046 drivers/media/platform/s5p-mfc/s5p_mfc.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma              1051 drivers/media/platform/s5p-mfc/s5p_mfc.c 		ret = vb2_mmap(&ctx->vq_src, vma);
vma              1054 drivers/media/platform/s5p-mfc/s5p_mfc.c 		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
vma              1055 drivers/media/platform/s5p-mfc/s5p_mfc.c 		ret = vb2_mmap(&ctx->vq_dst, vma);
vma              1018 drivers/media/platform/sh_veu.c static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
vma              1022 drivers/media/platform/sh_veu.c 	return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
vma               440 drivers/media/usb/cpia2/cpia2.h int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
vma              2389 drivers/media/usb/cpia2/cpia2_core.c int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
vma              2391 drivers/media/usb/cpia2/cpia2_core.c 	const char *adr = (const char *)vma->vm_start;
vma              2392 drivers/media/usb/cpia2/cpia2_core.c 	unsigned long size = vma->vm_end-vma->vm_start;
vma              2393 drivers/media/usb/cpia2/cpia2_core.c 	unsigned long start_offset = vma->vm_pgoff << PAGE_SHIFT;
vma              2410 drivers/media/usb/cpia2/cpia2_core.c 		if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
vma              1847 drivers/media/usb/cx231xx/cx231xx-417.c static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
vma              1853 drivers/media/usb/cx231xx/cx231xx-417.c 	return videobuf_mmap_mapper(&fh->vidq, vma);
vma              2042 drivers/media/usb/cx231xx/cx231xx-video.c static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
vma              2059 drivers/media/usb/cx231xx/cx231xx-video.c 	rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
vma              2063 drivers/media/usb/cx231xx/cx231xx-video.c 			 (unsigned long)vma->vm_start,
vma              2064 drivers/media/usb/cx231xx/cx231xx-video.c 			 (unsigned long)vma->vm_end -
vma              2065 drivers/media/usb/cx231xx/cx231xx-video.c 			 (unsigned long)vma->vm_start, rc);
vma               743 drivers/media/usb/stkwebcam/stk-webcam.c static void stk_v4l_vm_open(struct vm_area_struct *vma)
vma               745 drivers/media/usb/stkwebcam/stk-webcam.c 	struct stk_sio_buffer *sbuf = vma->vm_private_data;
vma               748 drivers/media/usb/stkwebcam/stk-webcam.c static void stk_v4l_vm_close(struct vm_area_struct *vma)
vma               750 drivers/media/usb/stkwebcam/stk-webcam.c 	struct stk_sio_buffer *sbuf = vma->vm_private_data;
vma               760 drivers/media/usb/stkwebcam/stk-webcam.c static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma)
vma               764 drivers/media/usb/stkwebcam/stk-webcam.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               768 drivers/media/usb/stkwebcam/stk-webcam.c 	if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
vma               779 drivers/media/usb/stkwebcam/stk-webcam.c 	ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
vma               782 drivers/media/usb/stkwebcam/stk-webcam.c 	vma->vm_flags |= VM_DONTEXPAND;
vma               783 drivers/media/usb/stkwebcam/stk-webcam.c 	vma->vm_private_data = sbuf;
vma               784 drivers/media/usb/stkwebcam/stk-webcam.c 	vma->vm_ops = &stk_v4l_vm_ops;
vma               786 drivers/media/usb/stkwebcam/stk-webcam.c 	stk_v4l_vm_open(vma);
vma              1502 drivers/media/usb/tm6000/tm6000-video.c static int tm6000_mmap(struct file *file, struct vm_area_struct * vma)
vma              1510 drivers/media/usb/tm6000/tm6000-video.c 	res = videobuf_mmap_mapper(&fh->vb_vidq, vma);
vma               350 drivers/media/usb/tm6000/tm6000.h int tm6000_v4l2_mmap(struct file *filp, struct vm_area_struct *vma);
vma              1002 drivers/media/usb/usbvision/usbvision-video.c static int usbvision_mmap(struct file *file, struct vm_area_struct *vma)
vma              1004 drivers/media/usb/usbvision/usbvision-video.c 	unsigned long size = vma->vm_end - vma->vm_start,
vma              1005 drivers/media/usb/usbvision/usbvision-video.c 		start = vma->vm_start;
vma              1015 drivers/media/usb/usbvision/usbvision-video.c 	if (!(vma->vm_flags & VM_WRITE) ||
vma              1022 drivers/media/usb/usbvision/usbvision-video.c 		    vma->vm_pgoff)
vma              1032 drivers/media/usb/usbvision/usbvision-video.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma              1036 drivers/media/usb/usbvision/usbvision-video.c 		if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
vma              1048 drivers/media/usb/usbvision/usbvision-video.c static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
vma              1055 drivers/media/usb/usbvision/usbvision-video.c 	res = usbvision_mmap(file, vma);
vma               359 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
vma               361 drivers/media/usb/uvc/uvc_queue.c 	return vb2_mmap(&queue->queue, vma);
vma              1434 drivers/media/usb/uvc/uvc_v4l2.c static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
vma              1441 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_queue_mmap(&stream->queue, vma);
vma               771 drivers/media/usb/uvc/uvcvideo.h 		   struct vm_area_struct *vma);
vma              1250 drivers/media/usb/zr364xx/zr364xx.c static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
vma              1259 drivers/media/usb/zr364xx/zr364xx.c 	DBG("mmap called, vma=%p\n", vma);
vma              1261 drivers/media/usb/zr364xx/zr364xx.c 	ret = videobuf_mmap_mapper(&cam->vb_vidq, vma);
vma              1264 drivers/media/usb/zr364xx/zr364xx.c 		(unsigned long)vma->vm_start,
vma              1265 drivers/media/usb/zr364xx/zr364xx.c 		(unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
vma               689 drivers/media/v4l2-core/v4l2-mem2mem.c 			 struct vm_area_struct *vma)
vma               691 drivers/media/v4l2-core/v4l2-mem2mem.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               698 drivers/media/v4l2-core/v4l2-mem2mem.c 		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
vma               701 drivers/media/v4l2-core/v4l2-mem2mem.c 	return vb2_mmap(vq, vma);
vma              1162 drivers/media/v4l2-core/v4l2-mem2mem.c int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
vma              1166 drivers/media/v4l2-core/v4l2-mem2mem.c 	return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
vma              1171 drivers/media/v4l2-core/videobuf-core.c int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
vma              1178 drivers/media/v4l2-core/videobuf-core.c 	if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
vma              1188 drivers/media/v4l2-core/videobuf-core.c 				buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
vma              1189 drivers/media/v4l2-core/videobuf-core.c 			rc = CALL(q, mmap_mapper, q, buf, vma);
vma                63 drivers/media/v4l2-core/videobuf-dma-contig.c static void videobuf_vm_open(struct vm_area_struct *vma)
vma                65 drivers/media/v4l2-core/videobuf-dma-contig.c 	struct videobuf_mapping *map = vma->vm_private_data;
vma                68 drivers/media/v4l2-core/videobuf-dma-contig.c 		map, map->count, vma->vm_start, vma->vm_end);
vma                73 drivers/media/v4l2-core/videobuf-dma-contig.c static void videobuf_vm_close(struct vm_area_struct *vma)
vma                75 drivers/media/v4l2-core/videobuf-dma-contig.c 	struct videobuf_mapping *map = vma->vm_private_data;
vma                80 drivers/media/v4l2-core/videobuf-dma-contig.c 		map, map->count, vma->vm_start, vma->vm_end);
vma               162 drivers/media/v4l2-core/videobuf-dma-contig.c 	struct vm_area_struct *vma;
vma               174 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma = find_vma(mm, untagged_baddr);
vma               175 drivers/media/v4l2-core/videobuf-dma-contig.c 	if (!vma)
vma               178 drivers/media/v4l2-core/videobuf-dma-contig.c 	if ((untagged_baddr + mem->size) > vma->vm_end)
vma               186 drivers/media/v4l2-core/videobuf-dma-contig.c 		ret = follow_pfn(vma, user_address, &this_pfn);
vma               276 drivers/media/v4l2-core/videobuf-dma-contig.c 				  struct vm_area_struct *vma)
vma               292 drivers/media/v4l2-core/videobuf-dma-contig.c 	buf->baddr = vma->vm_start;
vma               303 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               311 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma->vm_pgoff = 0;
vma               313 drivers/media/v4l2-core/videobuf-dma-contig.c 	retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
vma               322 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma->vm_ops = &videobuf_vm_ops;
vma               323 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma->vm_flags |= VM_DONTEXPAND;
vma               324 drivers/media/v4l2-core/videobuf-dma-contig.c 	vma->vm_private_data = map;
vma               327 drivers/media/v4l2-core/videobuf-dma-contig.c 		map, q, vma->vm_start, vma->vm_end,
vma               328 drivers/media/v4l2-core/videobuf-dma-contig.c 		(long int)buf->bsize, vma->vm_pgoff, buf->i);
vma               330 drivers/media/v4l2-core/videobuf-dma-contig.c 	videobuf_vm_open(vma);
vma               387 drivers/media/v4l2-core/videobuf-dma-sg.c static void videobuf_vm_open(struct vm_area_struct *vma)
vma               389 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct videobuf_mapping *map = vma->vm_private_data;
vma               392 drivers/media/v4l2-core/videobuf-dma-sg.c 		map->count, vma->vm_start, vma->vm_end);
vma               397 drivers/media/v4l2-core/videobuf-dma-sg.c static void videobuf_vm_close(struct vm_area_struct *vma)
vma               399 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct videobuf_mapping *map = vma->vm_private_data;
vma               405 drivers/media/v4l2-core/videobuf-dma-sg.c 		map->count, vma->vm_start, vma->vm_end);
vma               440 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct vm_area_struct *vma = vmf->vma;
vma               444 drivers/media/v4l2-core/videobuf-dma-sg.c 		vmf->address, vma->vm_start, vma->vm_end);
vma               592 drivers/media/v4l2-core/videobuf-dma-sg.c 				  struct vm_area_struct *vma)
vma               615 drivers/media/v4l2-core/videobuf-dma-sg.c 				(vma->vm_pgoff << PAGE_SHIFT));
vma               632 drivers/media/v4l2-core/videobuf-dma-sg.c 		q->bufs[i]->baddr = vma->vm_start + size;
vma               638 drivers/media/v4l2-core/videobuf-dma-sg.c 	vma->vm_ops   = &videobuf_vm_ops;
vma               639 drivers/media/v4l2-core/videobuf-dma-sg.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               640 drivers/media/v4l2-core/videobuf-dma-sg.c 	vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
vma               641 drivers/media/v4l2-core/videobuf-dma-sg.c 	vma->vm_private_data = map;
vma               643 drivers/media/v4l2-core/videobuf-dma-sg.c 		map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
vma                51 drivers/media/v4l2-core/videobuf-vmalloc.c static void videobuf_vm_open(struct vm_area_struct *vma)
vma                53 drivers/media/v4l2-core/videobuf-vmalloc.c 	struct videobuf_mapping *map = vma->vm_private_data;
vma                56 drivers/media/v4l2-core/videobuf-vmalloc.c 		map->count, vma->vm_start, vma->vm_end);
vma                61 drivers/media/v4l2-core/videobuf-vmalloc.c static void videobuf_vm_close(struct vm_area_struct *vma)
vma                63 drivers/media/v4l2-core/videobuf-vmalloc.c 	struct videobuf_mapping *map = vma->vm_private_data;
vma                68 drivers/media/v4l2-core/videobuf-vmalloc.c 		map->count, vma->vm_start, vma->vm_end);
vma               211 drivers/media/v4l2-core/videobuf-vmalloc.c 				  struct vm_area_struct *vma)
vma               227 drivers/media/v4l2-core/videobuf-vmalloc.c 	buf->baddr = vma->vm_start;
vma               233 drivers/media/v4l2-core/videobuf-vmalloc.c 	pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
vma               242 drivers/media/v4l2-core/videobuf-vmalloc.c 	retval = remap_vmalloc_range(vma, mem->vaddr, 0);
vma               249 drivers/media/v4l2-core/videobuf-vmalloc.c 	vma->vm_ops          = &videobuf_vm_ops;
vma               250 drivers/media/v4l2-core/videobuf-vmalloc.c 	vma->vm_flags       |= VM_DONTEXPAND | VM_DONTDUMP;
vma               251 drivers/media/v4l2-core/videobuf-vmalloc.c 	vma->vm_private_data = map;
vma               254 drivers/media/v4l2-core/videobuf-vmalloc.c 		map, q, vma->vm_start, vma->vm_end,
vma               256 drivers/media/v4l2-core/videobuf-vmalloc.c 		vma->vm_pgoff, buf->i);
vma               258 drivers/media/v4l2-core/videobuf-vmalloc.c 	videobuf_vm_open(vma);
vma               128 drivers/misc/cxl/context.c 	struct vm_area_struct *vma = vmf->vma;
vma               129 drivers/misc/cxl/context.c 	struct cxl_context *ctx = vma->vm_file->private_data;
vma               162 drivers/misc/cxl/context.c 			vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
vma               168 drivers/misc/cxl/context.c 	ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
vma               182 drivers/misc/cxl/context.c int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
vma               184 drivers/misc/cxl/context.c 	u64 start = vma->vm_pgoff << PAGE_SHIFT;
vma               185 drivers/misc/cxl/context.c 	u64 len = vma->vm_end - vma->vm_start;
vma               223 drivers/misc/cxl/context.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               224 drivers/misc/cxl/context.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               225 drivers/misc/cxl/context.c 	vma->vm_ops = &cxl_mmap_vmops;
vma               983 drivers/misc/cxl/cxl.h int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
vma               207 drivers/misc/cxl/cxllib.c 	struct vm_area_struct *vma = NULL;
vma               212 drivers/misc/cxl/cxllib.c 	vma = find_vma(mm, addr);
vma               213 drivers/misc/cxl/cxllib.c 	if (!vma) {
vma               217 drivers/misc/cxl/cxllib.c 	*page_size = vma_kernel_pagesize(vma);
vma               218 drivers/misc/cxl/cxllib.c 	*vma_start = vma->vm_start;
vma               219 drivers/misc/cxl/cxllib.c 	*vma_end = vma->vm_end;
vma               313 drivers/misc/cxl/fault.c 	struct vm_area_struct *vma;
vma               325 drivers/misc/cxl/fault.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               326 drivers/misc/cxl/fault.c 		for (ea = vma->vm_start; ea < vma->vm_end;
vma               573 drivers/misc/fastrpc.c 			struct vm_area_struct *vma)
vma               576 drivers/misc/fastrpc.c 	size_t size = vma->vm_end - vma->vm_start;
vma               578 drivers/misc/fastrpc.c 	return dma_mmap_coherent(buf->dev, vma, buf->virt,
vma               786 drivers/misc/fastrpc.c 			struct vm_area_struct *vma = NULL;
vma               791 drivers/misc/fastrpc.c 			vma = find_vma(current->mm, ctx->args[i].ptr);
vma               792 drivers/misc/fastrpc.c 			if (vma)
vma               794 drivers/misc/fastrpc.c 						 vma->vm_start;
vma               376 drivers/misc/genwqe/card_dev.c static void genwqe_vma_open(struct vm_area_struct *vma)
vma               386 drivers/misc/genwqe/card_dev.c static void genwqe_vma_close(struct vm_area_struct *vma)
vma               388 drivers/misc/genwqe/card_dev.c 	unsigned long vsize = vma->vm_end - vma->vm_start;
vma               389 drivers/misc/genwqe/card_dev.c 	struct inode *inode = file_inode(vma->vm_file);
vma               395 drivers/misc/genwqe/card_dev.c 	struct genwqe_file *cfile = vma->vm_private_data;
vma               397 drivers/misc/genwqe/card_dev.c 	dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
vma               402 drivers/misc/genwqe/card_dev.c 			__func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
vma               429 drivers/misc/genwqe/card_dev.c static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
vma               432 drivers/misc/genwqe/card_dev.c 	unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
vma               448 drivers/misc/genwqe/card_dev.c 	dma_map->u_vaddr = (void *)vma->vm_start;
vma               462 drivers/misc/genwqe/card_dev.c 	rc = remap_pfn_range(vma,
vma               463 drivers/misc/genwqe/card_dev.c 			     vma->vm_start,
vma               466 drivers/misc/genwqe/card_dev.c 			     vma->vm_page_prot);
vma               472 drivers/misc/genwqe/card_dev.c 	vma->vm_private_data = cfile;
vma               473 drivers/misc/genwqe/card_dev.c 	vma->vm_ops = &genwqe_vma_ops;
vma               243 drivers/misc/habanalabs/command_buffer.c static void cb_vm_close(struct vm_area_struct *vma)
vma               245 drivers/misc/habanalabs/command_buffer.c 	struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
vma               248 drivers/misc/habanalabs/command_buffer.c 	new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
vma               260 drivers/misc/habanalabs/command_buffer.c 	vma->vm_private_data = NULL;
vma               267 drivers/misc/habanalabs/command_buffer.c int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma               275 drivers/misc/habanalabs/command_buffer.c 	handle = vma->vm_pgoff;
vma               286 drivers/misc/habanalabs/command_buffer.c 	if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
vma               289 drivers/misc/habanalabs/command_buffer.c 			vma->vm_end - vma->vm_start, cb->size);
vma               307 drivers/misc/habanalabs/command_buffer.c 	vma->vm_ops = &cb_vm_ops;
vma               314 drivers/misc/habanalabs/command_buffer.c 	vma->vm_private_data = cb;
vma               319 drivers/misc/habanalabs/command_buffer.c 	rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
vma               125 drivers/misc/habanalabs/device.c static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
vma               129 drivers/misc/habanalabs/device.c 	if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) {
vma               130 drivers/misc/habanalabs/device.c 		vma->vm_pgoff ^= HL_MMAP_CB_MASK;
vma               131 drivers/misc/habanalabs/device.c 		return hl_cb_mmap(hpriv, vma);
vma              2697 drivers/misc/habanalabs/goya/goya.c static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vma              2702 drivers/misc/habanalabs/goya/goya.c 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
vma              2705 drivers/misc/habanalabs/goya/goya.c 	rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
vma              2706 drivers/misc/habanalabs/goya/goya.c 				size, vma->vm_page_prot);
vma               518 drivers/misc/habanalabs/habanalabs.h 	int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
vma              1493 drivers/misc/habanalabs/habanalabs.h int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
vma               195 drivers/misc/mic/scif/scif_epd.h int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
vma                28 drivers/misc/mic/scif/scif_fd.c static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
vma                32 drivers/misc/mic/scif/scif_fd.c 	return scif_mmap(vma, priv);
vma                18 drivers/misc/mic/scif/scif_mmap.c 	struct vm_area_struct *vma;
vma                70 drivers/misc/mic/scif/scif_mmap.c 	struct vm_area_struct *vma;
vma                76 drivers/misc/mic/scif/scif_mmap.c 		vma = info->vma;
vma                77 drivers/misc/mic/scif/scif_mmap.c 		size = vma->vm_end - vma->vm_start;
vma                78 drivers/misc/mic/scif/scif_mmap.c 		zap_vma_ptes(vma, vma->vm_start, size);
vma                81 drivers/misc/mic/scif/scif_mmap.c 			__func__, ep, info->vma, size);
vma               160 drivers/misc/mic/scif/scif_mmap.c static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
vma               170 drivers/misc/mic/scif/scif_mmap.c 	info->vma = vma;
vma               179 drivers/misc/mic/scif/scif_mmap.c static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
vma               187 drivers/misc/mic/scif/scif_mmap.c 		if (info->vma == vma) {
vma               373 drivers/misc/mic/scif/scif_mmap.c 			      int nr_pages, struct vm_area_struct *vma)
vma               398 drivers/misc/mic/scif/scif_mmap.c 			err = remap_pfn_range(vma,
vma               399 drivers/misc/mic/scif/scif_mmap.c 					      vma->vm_start +
vma               403 drivers/misc/mic/scif/scif_mmap.c 					      vma->vm_page_prot);
vma               524 drivers/misc/mic/scif/scif_mmap.c static void scif_vma_open(struct vm_area_struct *vma)
vma               526 drivers/misc/mic/scif/scif_mmap.c 	struct vma_pvt *vmapvt = vma->vm_private_data;
vma               530 drivers/misc/mic/scif/scif_mmap.c 		vma->vm_start, vma->vm_end);
vma               531 drivers/misc/mic/scif/scif_mmap.c 	scif_insert_vma(vmapvt->ep, vma);
vma               542 drivers/misc/mic/scif/scif_mmap.c static void scif_munmap(struct vm_area_struct *vma)
vma               545 drivers/misc/mic/scif/scif_mmap.c 	struct vma_pvt *vmapvt = vma->vm_private_data;
vma               546 drivers/misc/mic/scif/scif_mmap.c 	int nr_pages = vma_pages(vma);
vma               555 drivers/misc/mic/scif/scif_mmap.c 		vma->vm_start, vma->vm_end);
vma               558 drivers/misc/mic/scif/scif_mmap.c 		(vma->vm_pgoff) << PAGE_SHIFT;
vma               564 drivers/misc/mic/scif/scif_mmap.c 	req.nr_bytes = vma->vm_end - vma->vm_start;
vma               565 drivers/misc/mic/scif/scif_mmap.c 	req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
vma               583 drivers/misc/mic/scif/scif_mmap.c 	vma->vm_ops = NULL;
vma               584 drivers/misc/mic/scif/scif_mmap.c 	vma->vm_private_data = NULL;
vma               586 drivers/misc/mic/scif/scif_mmap.c 	scif_delete_vma(ep, vma);
vma               602 drivers/misc/mic/scif/scif_mmap.c int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd)
vma               607 drivers/misc/mic/scif/scif_mmap.c 	s64 start_offset = vma->vm_pgoff << PAGE_SHIFT;
vma               608 drivers/misc/mic/scif/scif_mmap.c 	int nr_pages = vma_pages(vma);
vma               621 drivers/misc/mic/scif/scif_mmap.c 	err = scif_insert_vma(ep, vma);
vma               627 drivers/misc/mic/scif/scif_mmap.c 		scif_delete_vma(ep, vma);
vma               636 drivers/misc/mic/scif/scif_mmap.c 	req.nr_bytes = vma->vm_end - vma->vm_start;
vma               637 drivers/misc/mic/scif/scif_mmap.c 	req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
vma               652 drivers/misc/mic/scif/scif_mmap.c 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               666 drivers/misc/mic/scif/scif_mmap.c 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma               669 drivers/misc/mic/scif/scif_mmap.c 		vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               672 drivers/misc/mic/scif/scif_mmap.c 	err = scif_rma_list_mmap(window, start_offset, nr_pages, vma);
vma               679 drivers/misc/mic/scif/scif_mmap.c 	vma->vm_ops = &scif_vm_ops;
vma               680 drivers/misc/mic/scif/scif_mmap.c 	vma->vm_private_data = vmapvt;
vma               687 drivers/misc/mic/scif/scif_mmap.c 		scif_delete_vma(ep, vma);
vma              1075 drivers/misc/mic/vop/vop_vringh.c static int vop_mmap(struct file *f, struct vm_area_struct *vma)
vma              1078 drivers/misc/mic/vop/vop_vringh.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma              1079 drivers/misc/mic/vop/vop_vringh.c 	unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
vma              1085 drivers/misc/mic/vop/vop_vringh.c 	if (vma->vm_flags & VM_WRITE) {
vma              1095 drivers/misc/mic/vop/vop_vringh.c 		err = remap_pfn_range(vma, vma->vm_start + offset,
vma              1097 drivers/misc/mic/vop/vop_vringh.c 				      vma->vm_page_prot);
vma                96 drivers/misc/ocxl/context.c static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
vma               106 drivers/misc/ocxl/context.c 	return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
vma               109 drivers/misc/ocxl/context.c static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
vma               132 drivers/misc/ocxl/context.c 	ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
vma               139 drivers/misc/ocxl/context.c 	struct vm_area_struct *vma = vmf->vma;
vma               140 drivers/misc/ocxl/context.c 	struct ocxl_context *ctx = vma->vm_file->private_data;
vma               149 drivers/misc/ocxl/context.c 		ret = map_pp_mmio(vma, vmf->address, offset, ctx);
vma               151 drivers/misc/ocxl/context.c 		ret = map_afu_irq(vma, vmf->address, offset, ctx);
vma               160 drivers/misc/ocxl/context.c 			struct vm_area_struct *vma)
vma               162 drivers/misc/ocxl/context.c 	int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
vma               165 drivers/misc/ocxl/context.c 	if (vma_pages(vma) != 1)
vma               178 drivers/misc/ocxl/context.c 	if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
vma               179 drivers/misc/ocxl/context.c 		!(vma->vm_flags & VM_WRITE))
vma               181 drivers/misc/ocxl/context.c 	vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
vma               186 drivers/misc/ocxl/context.c 			struct vm_area_struct *vma)
vma               188 drivers/misc/ocxl/context.c 	if ((vma_pages(vma) + vma->vm_pgoff) >
vma               194 drivers/misc/ocxl/context.c int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
vma               198 drivers/misc/ocxl/context.c 	if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset)
vma               199 drivers/misc/ocxl/context.c 		rc = check_mmap_mmio(ctx, vma);
vma               201 drivers/misc/ocxl/context.c 		rc = check_mmap_afu_irq(ctx, vma);
vma               205 drivers/misc/ocxl/context.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               206 drivers/misc/ocxl/context.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               207 drivers/misc/ocxl/context.c 	vma->vm_ops = &ocxl_vmops;
vma               293 drivers/misc/ocxl/file.c static int afu_mmap(struct file *file, struct vm_area_struct *vma)
vma               298 drivers/misc/ocxl/file.c 	return ocxl_context_mmap(ctx, vma);
vma               134 drivers/misc/ocxl/ocxl_internal.h 			struct vm_area_struct *vma);
vma                76 drivers/misc/ocxl/sysfs.c 	struct vm_area_struct *vma = vmf->vma;
vma                77 drivers/misc/ocxl/sysfs.c 	struct ocxl_afu *afu = vma->vm_private_data;
vma                85 drivers/misc/ocxl/sysfs.c 	return vmf_insert_pfn(vma, vmf->address, offset);
vma                94 drivers/misc/ocxl/sysfs.c 			struct vm_area_struct *vma)
vma                98 drivers/misc/ocxl/sysfs.c 	if ((vma_pages(vma) + vma->vm_pgoff) >
vma               102 drivers/misc/ocxl/sysfs.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma               103 drivers/misc/ocxl/sysfs.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               104 drivers/misc/ocxl/sysfs.c 	vma->vm_ops = &global_mmio_vmops;
vma               105 drivers/misc/ocxl/sysfs.c 	vma->vm_private_data = afu;
vma                50 drivers/misc/sgi-gru/grufault.c 	struct vm_area_struct *vma;
vma                52 drivers/misc/sgi-gru/grufault.c 	vma = find_vma(current->mm, vaddr);
vma                53 drivers/misc/sgi-gru/grufault.c 	if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
vma                54 drivers/misc/sgi-gru/grufault.c 		return vma;
vma                69 drivers/misc/sgi-gru/grufault.c 	struct vm_area_struct *vma;
vma                73 drivers/misc/sgi-gru/grufault.c 	vma = gru_find_vma(vaddr);
vma                74 drivers/misc/sgi-gru/grufault.c 	if (vma)
vma                75 drivers/misc/sgi-gru/grufault.c 		gts = gru_find_thread_state(vma, TSID(vaddr, vma));
vma                86 drivers/misc/sgi-gru/grufault.c 	struct vm_area_struct *vma;
vma                90 drivers/misc/sgi-gru/grufault.c 	vma = gru_find_vma(vaddr);
vma                91 drivers/misc/sgi-gru/grufault.c 	if (!vma)
vma                94 drivers/misc/sgi-gru/grufault.c 	gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
vma               177 drivers/misc/sgi-gru/grufault.c static int non_atomic_pte_lookup(struct vm_area_struct *vma,
vma               184 drivers/misc/sgi-gru/grufault.c 	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
vma               205 drivers/misc/sgi-gru/grufault.c static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
vma               214 drivers/misc/sgi-gru/grufault.c 	pgdp = pgd_offset(vma->vm_mm, vaddr);
vma               242 drivers/misc/sgi-gru/grufault.c 	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
vma               256 drivers/misc/sgi-gru/grufault.c 	struct vm_area_struct *vma;
vma               260 drivers/misc/sgi-gru/grufault.c 	vma = find_vma(mm, vaddr);
vma               261 drivers/misc/sgi-gru/grufault.c 	if (!vma)
vma               269 drivers/misc/sgi-gru/grufault.c 	ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
vma               273 drivers/misc/sgi-gru/grufault.c 		if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
vma                60 drivers/misc/sgi-gru/grufile.c static void gru_vma_close(struct vm_area_struct *vma)
vma                66 drivers/misc/sgi-gru/grufile.c 	if (!vma->vm_private_data)
vma                69 drivers/misc/sgi-gru/grufile.c 	vdata = vma->vm_private_data;
vma                70 drivers/misc/sgi-gru/grufile.c 	vma->vm_private_data = NULL;
vma                71 drivers/misc/sgi-gru/grufile.c 	gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file,
vma                94 drivers/misc/sgi-gru/grufile.c static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
vma                96 drivers/misc/sgi-gru/grufile.c 	if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE))
vma                99 drivers/misc/sgi-gru/grufile.c 	if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
vma               100 drivers/misc/sgi-gru/grufile.c 				vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
vma               103 drivers/misc/sgi-gru/grufile.c 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
vma               105 drivers/misc/sgi-gru/grufile.c 	vma->vm_page_prot = PAGE_SHARED;
vma               106 drivers/misc/sgi-gru/grufile.c 	vma->vm_ops = &gru_vm_ops;
vma               108 drivers/misc/sgi-gru/grufile.c 	vma->vm_private_data = gru_alloc_vma_data(vma, 0);
vma               109 drivers/misc/sgi-gru/grufile.c 	if (!vma->vm_private_data)
vma               113 drivers/misc/sgi-gru/grufile.c 		file, vma->vm_start, vma, vma->vm_private_data);
vma               123 drivers/misc/sgi-gru/grufile.c 	struct vm_area_struct *vma;
vma               139 drivers/misc/sgi-gru/grufile.c 	vma = gru_find_vma(req.gseg);
vma               140 drivers/misc/sgi-gru/grufile.c 	if (vma) {
vma               141 drivers/misc/sgi-gru/grufile.c 		vdata = vma->vm_private_data;
vma               310 drivers/misc/sgi-gru/grumain.c struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
vma               339 drivers/misc/sgi-gru/grumain.c 	if (vma) {
vma               341 drivers/misc/sgi-gru/grumain.c 		gts->ts_vma = vma;
vma               359 drivers/misc/sgi-gru/grumain.c struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
vma               377 drivers/misc/sgi-gru/grumain.c struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
vma               380 drivers/misc/sgi-gru/grumain.c 	struct gru_vma_data *vdata = vma->vm_private_data;
vma               386 drivers/misc/sgi-gru/grumain.c 	gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
vma               394 drivers/misc/sgi-gru/grumain.c struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
vma               397 drivers/misc/sgi-gru/grumain.c 	struct gru_vma_data *vdata = vma->vm_private_data;
vma               400 drivers/misc/sgi-gru/grumain.c 	gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
vma               417 drivers/misc/sgi-gru/grumain.c 	gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
vma               918 drivers/misc/sgi-gru/grumain.c 	struct vm_area_struct *vma = vmf->vma;
vma               925 drivers/misc/sgi-gru/grumain.c 		vma, vaddr, GSEG_BASE(vaddr));
vma               929 drivers/misc/sgi-gru/grumain.c 	gts = gru_find_thread_state(vma, TSID(vaddr, vma));
vma               953 drivers/misc/sgi-gru/grumain.c 		remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
vma               955 drivers/misc/sgi-gru/grumain.c 				vma->vm_page_prot);
vma               615 drivers/misc/sgi-gru/grutables.h extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
vma               618 drivers/misc/sgi-gru/grutables.h 				*vma, int tsid);
vma               620 drivers/misc/sgi-gru/grutables.h 				*vma, int tsid);
vma               647 drivers/misc/sgi-gru/grutables.h extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
vma              1131 drivers/mtd/mtdchar.c static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
vma              1145 drivers/mtd/mtdchar.c 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1147 drivers/mtd/mtdchar.c 		return vm_iomap_memory(vma, map->phys, map->size);
vma              1151 drivers/mtd/mtdchar.c 	return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
vma               207 drivers/net/ethernet/ethoc.c 	void **vma;
vma               302 drivers/net/ethernet/ethoc.c 	void *vma;
vma               313 drivers/net/ethernet/ethoc.c 	vma = dev->membase;
vma               322 drivers/net/ethernet/ethoc.c 		dev->vma[i] = vma;
vma               323 drivers/net/ethernet/ethoc.c 		vma += ETHOC_BUFSIZ;
vma               335 drivers/net/ethernet/ethoc.c 		dev->vma[dev->num_tx + i] = vma;
vma               336 drivers/net/ethernet/ethoc.c 		vma += ETHOC_BUFSIZ;
vma               449 drivers/net/ethernet/ethoc.c 				void *src = priv->vma[entry];
vma               907 drivers/net/ethernet/ethoc.c 	dest = priv->vma[entry];
vma              1138 drivers/net/ethernet/ethoc.c 	priv->vma = devm_kcalloc(&pdev->dev, num_bd, sizeof(void *),
vma              1140 drivers/net/ethernet/ethoc.c 	if (!priv->vma) {
vma               257 drivers/oprofile/buffer_sync.c 	struct vm_area_struct *vma;
vma               260 drivers/oprofile/buffer_sync.c 	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
vma               262 drivers/oprofile/buffer_sync.c 		if (addr < vma->vm_start || addr >= vma->vm_end)
vma               265 drivers/oprofile/buffer_sync.c 		if (vma->vm_file) {
vma               266 drivers/oprofile/buffer_sync.c 			cookie = fast_get_dcookie(&vma->vm_file->f_path);
vma               267 drivers/oprofile/buffer_sync.c 			*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
vma               268 drivers/oprofile/buffer_sync.c 				vma->vm_start;
vma               277 drivers/oprofile/buffer_sync.c 	if (!vma)
vma               323 drivers/pci/controller/vmd.c static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
vma               327 drivers/pci/controller/vmd.c 	return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
vma                23 drivers/pci/mmap.c 			struct vm_area_struct *vma,
vma                31 drivers/pci/mmap.c 	vma->vm_pgoff -= start >> PAGE_SHIFT;
vma                32 drivers/pci/mmap.c 	return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
vma                44 drivers/pci/mmap.c 			    struct vm_area_struct *vma,
vma                51 drivers/pci/mmap.c 	if (vma->vm_pgoff + vma_pages(vma) > size)
vma                55 drivers/pci/mmap.c 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma                57 drivers/pci/mmap.c 		vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
vma                60 drivers/pci/mmap.c 		ret = pci_iobar_pfn(pdev, bar, vma);
vma                64 drivers/pci/mmap.c 		vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
vma                66 drivers/pci/mmap.c 	vma->vm_ops = &pci_phys_vm_ops;
vma                68 drivers/pci/mmap.c 	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma                69 drivers/pci/mmap.c 				  vma->vm_end - vma->vm_start,
vma                70 drivers/pci/mmap.c 				  vma->vm_page_prot);
vma                82 drivers/pci/mmap.c 			    struct vm_area_struct *vma,
vma                93 drivers/pci/mmap.c 	vma->vm_pgoff += start >> PAGE_SHIFT;
vma                94 drivers/pci/mmap.c 	return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
vma               882 drivers/pci/pci-sysfs.c 			       struct vm_area_struct *vma)
vma               886 drivers/pci/pci-sysfs.c 	return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
vma               902 drivers/pci/pci-sysfs.c 			      struct vm_area_struct *vma)
vma               906 drivers/pci/pci-sysfs.c 	return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
vma               988 drivers/pci/pci-sysfs.c int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
vma               996 drivers/pci/pci-sysfs.c 	nr = vma_pages(vma);
vma               997 drivers/pci/pci-sysfs.c 	start = vma->vm_pgoff;
vma              1020 drivers/pci/pci-sysfs.c 			     struct vm_area_struct *vma, int write_combine)
vma              1035 drivers/pci/pci-sysfs.c 	if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
vma              1040 drivers/pci/pci-sysfs.c 	return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
vma              1045 drivers/pci/pci-sysfs.c 				struct vm_area_struct *vma)
vma              1047 drivers/pci/pci-sysfs.c 	return pci_mmap_resource(kobj, attr, vma, 0);
vma              1052 drivers/pci/pci-sysfs.c 				struct vm_area_struct *vma)
vma              1054 drivers/pci/pci-sysfs.c 	return pci_mmap_resource(kobj, attr, vma, 1);
vma               244 drivers/pci/proc.c static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
vma               263 drivers/pci/proc.c 		    pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
vma               277 drivers/pci/proc.c 	ret = pci_mmap_page_range(dev, i, vma,
vma              2169 drivers/rapidio/devices/rio_mport_cdev.c static void mport_mm_open(struct vm_area_struct *vma)
vma              2171 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport_mapping *map = vma->vm_private_data;
vma              2177 drivers/rapidio/devices/rio_mport_cdev.c static void mport_mm_close(struct vm_area_struct *vma)
vma              2179 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport_mapping *map = vma->vm_private_data;
vma              2192 drivers/rapidio/devices/rio_mport_cdev.c static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
vma              2196 drivers/rapidio/devices/rio_mport_cdev.c 	size_t size = vma->vm_end - vma->vm_start;
vma              2203 drivers/rapidio/devices/rio_mport_cdev.c 		   (unsigned int)size, vma->vm_pgoff);
vma              2206 drivers/rapidio/devices/rio_mport_cdev.c 	baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
vma              2226 drivers/rapidio/devices/rio_mport_cdev.c 	vma->vm_pgoff = offset >> PAGE_SHIFT;
vma              2227 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
vma              2230 drivers/rapidio/devices/rio_mport_cdev.c 		ret = dma_mmap_coherent(md->mport->dev.parent, vma,
vma              2233 drivers/rapidio/devices/rio_mport_cdev.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              2234 drivers/rapidio/devices/rio_mport_cdev.c 		ret = vm_iomap_memory(vma, map->phys_addr, map->size);
vma              2241 drivers/rapidio/devices/rio_mport_cdev.c 		vma->vm_private_data = map;
vma              2242 drivers/rapidio/devices/rio_mport_cdev.c 		vma->vm_ops = &vm_ops;
vma              2243 drivers/rapidio/devices/rio_mport_cdev.c 		mport_mm_open(vma);
vma                37 drivers/sbus/char/flash.c flash_mmap(struct file *file, struct vm_area_struct *vma)
vma                47 drivers/sbus/char/flash.c 		if ((vma->vm_flags & VM_READ) &&
vma                48 drivers/sbus/char/flash.c 		    (vma->vm_flags & VM_WRITE)) {
vma                52 drivers/sbus/char/flash.c 		if (vma->vm_flags & VM_READ) {
vma                55 drivers/sbus/char/flash.c 		} else if (vma->vm_flags & VM_WRITE) {
vma                65 drivers/sbus/char/flash.c 	if ((vma->vm_pgoff << PAGE_SHIFT) > size)
vma                67 drivers/sbus/char/flash.c 	addr = vma->vm_pgoff + (addr >> PAGE_SHIFT);
vma                69 drivers/sbus/char/flash.c 	if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
vma                70 drivers/sbus/char/flash.c 		size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
vma                72 drivers/sbus/char/flash.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                74 drivers/sbus/char/flash.c 	if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
vma               211 drivers/sbus/char/oradax.c static int dax_devmap(struct file *f, struct vm_area_struct *vma);
vma               372 drivers/sbus/char/oradax.c static int dax_devmap(struct file *f, struct vm_area_struct *vma)
vma               375 drivers/sbus/char/oradax.c 	size_t len = vma->vm_end - vma->vm_start;
vma               377 drivers/sbus/char/oradax.c 	dax_dbg("len=0x%lx, flags=0x%lx", len, vma->vm_flags);
vma               390 drivers/sbus/char/oradax.c 	if (vma->vm_flags & VM_WRITE)
vma               392 drivers/sbus/char/oradax.c 	vma->vm_flags &= ~VM_MAYWRITE;
vma               394 drivers/sbus/char/oradax.c 	if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
vma               395 drivers/sbus/char/oradax.c 			    len, vma->vm_page_prot))
vma               398 drivers/sbus/char/oradax.c 	dax_dbg("mmapped completion area at uva 0x%lx", vma->vm_start);
vma              1127 drivers/scsi/cxlflash/ocxl_hw.c 	struct vm_area_struct *vma = vmf->vma;
vma              1128 drivers/scsi/cxlflash/ocxl_hw.c 	struct ocxlflash_context *ctx = vma->vm_file->private_data;
vma              1148 drivers/scsi/cxlflash/ocxl_hw.c 	return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
vma              1162 drivers/scsi/cxlflash/ocxl_hw.c static int afu_mmap(struct file *file, struct vm_area_struct *vma)
vma              1166 drivers/scsi/cxlflash/ocxl_hw.c 	if ((vma_pages(vma) + vma->vm_pgoff) >
vma              1170 drivers/scsi/cxlflash/ocxl_hw.c 	vma->vm_flags |= VM_IO | VM_PFNMAP;
vma              1171 drivers/scsi/cxlflash/ocxl_hw.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1172 drivers/scsi/cxlflash/ocxl_hw.c 	vma->vm_ops = &ocxlflash_vmops;
vma              1355 drivers/scsi/cxlflash/ocxl_hw.c static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
vma              1357 drivers/scsi/cxlflash/ocxl_hw.c 	return afu_mmap(file, vma);
vma              1105 drivers/scsi/cxlflash/superpipe.c 	struct vm_area_struct *vma = vmf->vma;
vma              1106 drivers/scsi/cxlflash/superpipe.c 	struct file *file = vma->vm_file;
vma              1133 drivers/scsi/cxlflash/superpipe.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1148 drivers/scsi/cxlflash/superpipe.c 		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
vma              1178 drivers/scsi/cxlflash/superpipe.c static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
vma              1206 drivers/scsi/cxlflash/superpipe.c 	rc = cfg->ops->fd_mmap(file, vma);
vma              1209 drivers/scsi/cxlflash/superpipe.c 		ctxi->cxl_mmap_vmops = vma->vm_ops;
vma              1210 drivers/scsi/cxlflash/superpipe.c 		vma->vm_ops = &cxlflash_mmap_vmops;
vma              1225 drivers/scsi/sg.c 	struct vm_area_struct *vma = vmf->vma;
vma              1231 drivers/scsi/sg.c 	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
vma              1240 drivers/scsi/sg.c 	sa = vma->vm_start;
vma              1242 drivers/scsi/sg.c 	for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
vma              1243 drivers/scsi/sg.c 		len = vma->vm_end - sa;
vma              1264 drivers/scsi/sg.c sg_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1272 drivers/scsi/sg.c 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
vma              1274 drivers/scsi/sg.c 	req_sz = vma->vm_end - vma->vm_start;
vma              1277 drivers/scsi/sg.c 				      (void *) vma->vm_start, (int) req_sz));
vma              1278 drivers/scsi/sg.c 	if (vma->vm_pgoff)
vma              1287 drivers/scsi/sg.c 	sa = vma->vm_start;
vma              1289 drivers/scsi/sg.c 	for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
vma              1290 drivers/scsi/sg.c 		len = vma->vm_end - sa;
vma              1296 drivers/scsi/sg.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma              1297 drivers/scsi/sg.c 	vma->vm_private_data = sfp;
vma              1298 drivers/scsi/sg.c 	vma->vm_ops = &sg_mmap_vm_ops;
vma                43 drivers/soc/aspeed/aspeed-lpc-ctrl.c static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
vma                46 drivers/soc/aspeed/aspeed-lpc-ctrl.c 	unsigned long vsize = vma->vm_end - vma->vm_start;
vma                47 drivers/soc/aspeed/aspeed-lpc-ctrl.c 	pgprot_t prot = vma->vm_page_prot;
vma                49 drivers/soc/aspeed/aspeed-lpc-ctrl.c 	if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
vma                55 drivers/soc/aspeed/aspeed-lpc-ctrl.c 	if (remap_pfn_range(vma, vma->vm_start,
vma                56 drivers/soc/aspeed/aspeed-lpc-ctrl.c 		(lpc_ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
vma               100 drivers/soc/aspeed/aspeed-p2a-ctrl.c static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
vma               110 drivers/soc/aspeed/aspeed-p2a-ctrl.c 	vsize = vma->vm_end - vma->vm_start;
vma               111 drivers/soc/aspeed/aspeed-p2a-ctrl.c 	prot = vma->vm_page_prot;
vma               113 drivers/soc/aspeed/aspeed-p2a-ctrl.c 	if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
vma               119 drivers/soc/aspeed/aspeed-p2a-ctrl.c 	if (remap_pfn_range(vma, vma->vm_start,
vma               120 drivers/soc/aspeed/aspeed-p2a-ctrl.c 		(ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
vma               132 drivers/soc/qcom/rmtfs_mem.c static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
vma               136 drivers/soc/qcom/rmtfs_mem.c 	if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
vma               139 drivers/soc/qcom/rmtfs_mem.c 			vma->vm_end, vma->vm_start,
vma               140 drivers/soc/qcom/rmtfs_mem.c 			(vma->vm_end - vma->vm_start), &rmtfs_mem->size);
vma               144 drivers/soc/qcom/rmtfs_mem.c 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               145 drivers/soc/qcom/rmtfs_mem.c 	return remap_pfn_range(vma,
vma               146 drivers/soc/qcom/rmtfs_mem.c 			       vma->vm_start,
vma               148 drivers/soc/qcom/rmtfs_mem.c 			       vma->vm_end - vma->vm_start,
vma               149 drivers/soc/qcom/rmtfs_mem.c 			       vma->vm_page_prot);
vma               354 drivers/staging/android/ashmem.c static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
vma               368 drivers/staging/android/ashmem.c static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
vma               383 drivers/staging/android/ashmem.c 	if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
vma               389 drivers/staging/android/ashmem.c 	if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
vma               394 drivers/staging/android/ashmem.c 	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
vma               404 drivers/staging/android/ashmem.c 		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
vma               431 drivers/staging/android/ashmem.c 	if (vma->vm_flags & VM_SHARED) {
vma               432 drivers/staging/android/ashmem.c 		ret = shmem_zero_setup(vma);
vma               438 drivers/staging/android/ashmem.c 		vma_set_anonymous(vma);
vma               441 drivers/staging/android/ashmem.c 	if (vma->vm_file)
vma               442 drivers/staging/android/ashmem.c 		fput(vma->vm_file);
vma               443 drivers/staging/android/ashmem.c 	vma->vm_file = asma->file;
vma               244 drivers/staging/android/ion/ion.c static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
vma               256 drivers/staging/android/ion/ion.c 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               260 drivers/staging/android/ion/ion.c 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
vma                93 drivers/staging/android/ion/ion.h 			struct vm_area_struct *vma);
vma               178 drivers/staging/android/ion/ion.h 		      struct vm_area_struct *vma);
vma                65 drivers/staging/android/ion/ion_heap.c 		      struct vm_area_struct *vma)
vma                68 drivers/staging/android/ion/ion_heap.c 	unsigned long addr = vma->vm_start;
vma                69 drivers/staging/android/ion/ion_heap.c 	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
vma                76 drivers/staging/android/ion/ion_heap.c 		unsigned long remainder = vma->vm_end - addr;
vma                88 drivers/staging/android/ion/ion_heap.c 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
vma                89 drivers/staging/android/ion/ion_heap.c 				      vma->vm_page_prot);
vma                93 drivers/staging/android/ion/ion_heap.c 		if (addr >= vma->vm_end)
vma              1119 drivers/staging/android/vsoc.c static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1121 drivers/staging/android/vsoc.c 	unsigned long len = vma->vm_end - vma->vm_start;
vma              1131 drivers/staging/android/vsoc.c 	area_off += (vma->vm_pgoff << PAGE_SHIFT);
vma              1132 drivers/staging/android/vsoc.c 	area_len -= (vma->vm_pgoff << PAGE_SHIFT);
vma              1135 drivers/staging/android/vsoc.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1137 drivers/staging/android/vsoc.c 	if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
vma              1138 drivers/staging/android/vsoc.c 			       len, vma->vm_page_prot))
vma              2277 drivers/staging/comedi/comedi_fops.c static int comedi_vm_access(struct vm_area_struct *vma, unsigned long addr,
vma              2280 drivers/staging/comedi/comedi_fops.c 	struct comedi_buf_map *bm = vma->vm_private_data;
vma              2282 drivers/staging/comedi/comedi_fops.c 	    addr - vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT);
vma              2286 drivers/staging/comedi/comedi_fops.c 	if (len > vma->vm_end - addr)
vma              2287 drivers/staging/comedi/comedi_fops.c 		len = vma->vm_end - addr;
vma              2297 drivers/staging/comedi/comedi_fops.c static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
vma              2305 drivers/staging/comedi/comedi_fops.c 	unsigned long start = vma->vm_start;
vma              2326 drivers/staging/comedi/comedi_fops.c 	if (vma->vm_flags & VM_WRITE)
vma              2341 drivers/staging/comedi/comedi_fops.c 	if (vma->vm_pgoff != 0) {
vma              2347 drivers/staging/comedi/comedi_fops.c 	size = vma->vm_end - vma->vm_start;
vma              2357 drivers/staging/comedi/comedi_fops.c 	n_pages = vma_pages(vma);
vma              2371 drivers/staging/comedi/comedi_fops.c 		retval = dma_mmap_coherent(bm->dma_hw_dev, vma, buf->virt_addr,
vma              2379 drivers/staging/comedi/comedi_fops.c 			retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
vma              2389 drivers/staging/comedi/comedi_fops.c 		vma->vm_ops = &comedi_vm_ops;
vma              2390 drivers/staging/comedi/comedi_fops.c 		vma->vm_private_data = bm;
vma              2392 drivers/staging/comedi/comedi_fops.c 		vma->vm_ops->open(vma);
vma               676 drivers/staging/gasket/gasket_core.c 					struct vm_area_struct *vma,
vma               692 drivers/staging/gasket/gasket_core.c 		(vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
vma               702 drivers/staging/gasket/gasket_core.c 	if ((vma->vm_flags & VM_WRITE) &&
vma               836 drivers/staging/gasket/gasket_core.c 				    const struct vm_area_struct *vma,
vma               844 drivers/staging/gasket/gasket_core.c 	raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
vma               861 drivers/staging/gasket/gasket_core.c 			   struct vm_area_struct *vma,
vma               872 drivers/staging/gasket/gasket_core.c 	ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
vma               877 drivers/staging/gasket/gasket_core.c 					 vma->vm_end - vma->vm_start,
vma               887 drivers/staging/gasket/gasket_core.c 	zap_vma_ptes(vma, vma->vm_start + virt_offset,
vma               896 drivers/staging/gasket/gasket_core.c do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
vma               913 drivers/staging/gasket/gasket_core.c 	ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
vma               918 drivers/staging/gasket/gasket_core.c 					 vma->vm_end - vma->vm_start,
vma               924 drivers/staging/gasket/gasket_core.c 	virt_base = vma->vm_start + virt_offset;
vma               927 drivers/staging/gasket/gasket_core.c 				     (vma->vm_pgoff << PAGE_SHIFT) +
vma               942 drivers/staging/gasket/gasket_core.c 		ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
vma               945 drivers/staging/gasket/gasket_core.c 					 vma->vm_page_prot);
vma               959 drivers/staging/gasket/gasket_core.c 	if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
vma               970 drivers/staging/gasket/gasket_core.c 				struct vm_area_struct *vma)
vma               974 drivers/staging/gasket/gasket_core.c 	const ulong requested_length = vma->vm_end - vma->vm_start;
vma               985 drivers/staging/gasket/gasket_core.c 	if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
vma               991 drivers/staging/gasket/gasket_core.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               993 drivers/staging/gasket/gasket_core.c 	ret = remap_pfn_range(vma, vma->vm_start,
vma               995 drivers/staging/gasket/gasket_core.c 			      PAGE_SHIFT, requested_length, vma->vm_page_prot);
vma              1008 drivers/staging/gasket/gasket_core.c 			     vma->vm_start);
vma              1013 drivers/staging/gasket/gasket_core.c static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1030 drivers/staging/gasket/gasket_core.c 	if (vma->vm_start & ~PAGE_MASK) {
vma              1033 drivers/staging/gasket/gasket_core.c 			vma->vm_start);
vma              1039 drivers/staging/gasket/gasket_core.c 	raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
vma              1041 drivers/staging/gasket/gasket_core.c 	vma_size = vma->vm_end - vma->vm_start;
vma              1066 drivers/staging/gasket/gasket_core.c 	vma->vm_private_data = gasket_dev;
vma              1069 drivers/staging/gasket/gasket_core.c 		return gasket_mmap_coherent(gasket_dev, vma);
vma              1079 drivers/staging/gasket/gasket_core.c 	if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
vma              1093 drivers/staging/gasket/gasket_core.c 		if (!gasket_mmap_has_permissions(gasket_dev, vma,
vma              1117 drivers/staging/gasket/gasket_core.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1119 drivers/staging/gasket/gasket_core.c 		map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
vma              1148 drivers/staging/gasket/gasket_core.c 		if (gasket_mm_unmap_region(gasket_dev, vma,
vma               559 drivers/staging/gasket/gasket_core.h 			   struct vm_area_struct *vma,
vma              1227 drivers/staging/gasket/gasket_page_table.c 			 dma_addr_t dma_address, ulong vma)
vma              1246 drivers/staging/gasket/gasket_page_table.c 			(u64)vma + j * PAGE_SIZE;
vma               247 drivers/staging/gasket/gasket_page_table.h 			 dma_addr_t dma_address, ulong vma);
vma                25 drivers/staging/media/ipu3/ipu3-css-pool.h 	struct vm_struct *vma;
vma               127 drivers/staging/media/ipu3/ipu3-dmamap.c 	map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
vma               128 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (!map->vma)
vma               131 drivers/staging/media/ipu3/ipu3-dmamap.c 	map->vma->pages = pages;
vma               133 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (map_vm_area(map->vma, PAGE_KERNEL, pages))
vma               138 drivers/staging/media/ipu3/ipu3-dmamap.c 	map->vaddr = map->vma->addr;
vma               141 drivers/staging/media/ipu3/ipu3-dmamap.c 		size, &map->daddr, map->vma->addr);
vma               143 drivers/staging/media/ipu3/ipu3-dmamap.c 	return map->vma->addr;
vma               146 drivers/staging/media/ipu3/ipu3-dmamap.c 	vunmap(map->vma->addr);
vma               152 drivers/staging/media/ipu3/ipu3-dmamap.c 	map->vma = NULL;
vma               180 drivers/staging/media/ipu3/ipu3-dmamap.c 	struct vm_struct *area = map->vma;
vma              1185 drivers/staging/media/omap4iss/iss_video.c static int iss_video_mmap(struct file *file, struct vm_area_struct *vma)
vma              1189 drivers/staging/media/omap4iss/iss_video.c 	return vb2_mmap(&vfh->queue, vma);
vma               781 drivers/staging/media/soc_camera/soc_camera.c static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
vma               787 drivers/staging/media/soc_camera/soc_camera.c 	dev_dbg(icd->pdev, "mmap called, vma=%p\n", vma);
vma               794 drivers/staging/media/soc_camera/soc_camera.c 	err = vb2_mmap(&icd->vb2_vidq, vma);
vma               798 drivers/staging/media/soc_camera/soc_camera.c 		(unsigned long)vma->vm_start,
vma               799 drivers/staging/media/soc_camera/soc_camera.c 		(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
vma               424 drivers/staging/vme/devices/vme_user.c static void vme_user_vm_open(struct vm_area_struct *vma)
vma               426 drivers/staging/vme/devices/vme_user.c 	struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
vma               431 drivers/staging/vme/devices/vme_user.c static void vme_user_vm_close(struct vm_area_struct *vma)
vma               433 drivers/staging/vme/devices/vme_user.c 	struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
vma               451 drivers/staging/vme/devices/vme_user.c static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
vma               458 drivers/staging/vme/devices/vme_user.c 	err = vme_master_mmap(image[minor].resource, vma);
vma               472 drivers/staging/vme/devices/vme_user.c 	vma->vm_ops = &vme_user_vm_ops;
vma               473 drivers/staging/vme/devices/vme_user.c 	vma->vm_private_data = vma_priv;
vma               482 drivers/staging/vme/devices/vme_user.c static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
vma               487 drivers/staging/vme/devices/vme_user.c 		return vme_user_master_mmap(minor, vma);
vma              1490 drivers/target/target_core_user.c static int tcmu_find_mem_index(struct vm_area_struct *vma)
vma              1492 drivers/target/target_core_user.c 	struct tcmu_dev *udev = vma->vm_private_data;
vma              1495 drivers/target/target_core_user.c 	if (vma->vm_pgoff < MAX_UIO_MAPS) {
vma              1496 drivers/target/target_core_user.c 		if (info->mem[vma->vm_pgoff].size == 0)
vma              1498 drivers/target/target_core_user.c 		return (int)vma->vm_pgoff;
vma              1528 drivers/target/target_core_user.c 	struct tcmu_dev *udev = vmf->vma->vm_private_data;
vma              1534 drivers/target/target_core_user.c 	int mi = tcmu_find_mem_index(vmf->vma);
vma              1567 drivers/target/target_core_user.c static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
vma              1571 drivers/target/target_core_user.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma              1572 drivers/target/target_core_user.c 	vma->vm_ops = &tcmu_vm_ops;
vma              1574 drivers/target/target_core_user.c 	vma->vm_private_data = udev;
vma              1577 drivers/target/target_core_user.c 	if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
vma               541 drivers/tee/optee/call.c static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
vma               543 drivers/tee/optee/call.c 	while (vma && is_normal_memory(vma->vm_page_prot)) {
vma               544 drivers/tee/optee/call.c 		if (vma->vm_end >= end)
vma               546 drivers/tee/optee/call.c 		vma = vma->vm_next;
vma                79 drivers/tee/tee_shm.c static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
vma                82 drivers/tee/tee_shm.c 	size_t size = vma->vm_end - vma->vm_start;
vma                88 drivers/tee/tee_shm.c 	return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
vma                89 drivers/tee/tee_shm.c 			       size, vma->vm_page_prot);
vma               655 drivers/uio/uio.c static int uio_find_mem_index(struct vm_area_struct *vma)
vma               657 drivers/uio/uio.c 	struct uio_device *idev = vma->vm_private_data;
vma               659 drivers/uio/uio.c 	if (vma->vm_pgoff < MAX_UIO_MAPS) {
vma               660 drivers/uio/uio.c 		if (idev->info->mem[vma->vm_pgoff].size == 0)
vma               662 drivers/uio/uio.c 		return (int)vma->vm_pgoff;
vma               669 drivers/uio/uio.c 	struct uio_device *idev = vmf->vma->vm_private_data;
vma               682 drivers/uio/uio.c 	mi = uio_find_mem_index(vmf->vma);
vma               712 drivers/uio/uio.c static int uio_mmap_logical(struct vm_area_struct *vma)
vma               714 drivers/uio/uio.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               715 drivers/uio/uio.c 	vma->vm_ops = &uio_logical_vm_ops;
vma               725 drivers/uio/uio.c static int uio_mmap_physical(struct vm_area_struct *vma)
vma               727 drivers/uio/uio.c 	struct uio_device *idev = vma->vm_private_data;
vma               728 drivers/uio/uio.c 	int mi = uio_find_mem_index(vma);
vma               737 drivers/uio/uio.c 	if (vma->vm_end - vma->vm_start > mem->size)
vma               740 drivers/uio/uio.c 	vma->vm_ops = &uio_physical_vm_ops;
vma               742 drivers/uio/uio.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               753 drivers/uio/uio.c 	return remap_pfn_range(vma,
vma               754 drivers/uio/uio.c 			       vma->vm_start,
vma               756 drivers/uio/uio.c 			       vma->vm_end - vma->vm_start,
vma               757 drivers/uio/uio.c 			       vma->vm_page_prot);
vma               760 drivers/uio/uio.c static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
vma               768 drivers/uio/uio.c 	if (vma->vm_end < vma->vm_start)
vma               771 drivers/uio/uio.c 	vma->vm_private_data = idev;
vma               779 drivers/uio/uio.c 	mi = uio_find_mem_index(vma);
vma               785 drivers/uio/uio.c 	requested_pages = vma_pages(vma);
vma               794 drivers/uio/uio.c 		ret = idev->info->mmap(idev->info, vma);
vma               801 drivers/uio/uio.c 		ret = uio_mmap_physical(vma);
vma               805 drivers/uio/uio.c 		ret = uio_mmap_logical(vma);
vma               128 drivers/uio/uio_hv_generic.c 			    struct vm_area_struct *vma)
vma               137 drivers/uio/uio_hv_generic.c 	return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
vma               194 drivers/usb/core/devio.c static void usbdev_vm_open(struct vm_area_struct *vma)
vma               196 drivers/usb/core/devio.c 	struct usb_memory *usbm = vma->vm_private_data;
vma               204 drivers/usb/core/devio.c static void usbdev_vm_close(struct vm_area_struct *vma)
vma               206 drivers/usb/core/devio.c 	struct usb_memory *usbm = vma->vm_private_data;
vma               216 drivers/usb/core/devio.c static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
vma               221 drivers/usb/core/devio.c 	size_t size = vma->vm_end - vma->vm_start;
vma               250 drivers/usb/core/devio.c 	usbm->vm_start = vma->vm_start;
vma               255 drivers/usb/core/devio.c 		if (remap_pfn_range(vma, vma->vm_start,
vma               257 drivers/usb/core/devio.c 				    size, vma->vm_page_prot) < 0) {
vma               262 drivers/usb/core/devio.c 		if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
vma               269 drivers/usb/core/devio.c 	vma->vm_flags |= VM_IO;
vma               270 drivers/usb/core/devio.c 	vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
vma               271 drivers/usb/core/devio.c 	vma->vm_ops = &usbdev_vm_ops;
vma               272 drivers/usb/core/devio.c 	vma->vm_private_data = usbm;
vma               202 drivers/usb/gadget/function/uvc_queue.c int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
vma               204 drivers/usb/gadget/function/uvc_queue.c 	return vb2_mmap(&queue->queue, vma);
vma                80 drivers/usb/gadget/function/uvc_queue.h int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
vma               321 drivers/usb/gadget/function/uvc_v4l2.c uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
vma               326 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_queue_mmap(&uvc->video.queue, vma);
vma              1222 drivers/usb/mon/mon_bin.c static void mon_bin_vma_open(struct vm_area_struct *vma)
vma              1224 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vma->vm_private_data;
vma              1232 drivers/usb/mon/mon_bin.c static void mon_bin_vma_close(struct vm_area_struct *vma)
vma              1236 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vma->vm_private_data;
vma              1247 drivers/usb/mon/mon_bin.c 	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
vma              1267 drivers/usb/mon/mon_bin.c static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
vma              1270 drivers/usb/mon/mon_bin.c 	vma->vm_ops = &mon_bin_vm_ops;
vma              1271 drivers/usb/mon/mon_bin.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma              1272 drivers/usb/mon/mon_bin.c 	vma->vm_private_data = filp->private_data;
vma              1273 drivers/usb/mon/mon_bin.c 	mon_bin_vma_open(vma);
vma                90 drivers/vfio/mdev/vfio_mdev.c static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
vma                98 drivers/vfio/mdev/vfio_mdev.c 	return parent->ops->mmap(mdev, vma);
vma              1195 drivers/vfio/pci/vfio_pci.c static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
vma              1203 drivers/vfio/pci/vfio_pci.c 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
vma              1205 drivers/vfio/pci/vfio_pci.c 	if (vma->vm_end < vma->vm_start)
vma              1207 drivers/vfio/pci/vfio_pci.c 	if ((vma->vm_flags & VM_SHARED) == 0)
vma              1215 drivers/vfio/pci/vfio_pci.c 			return region->ops->mmap(vdev, region, vma);
vma              1224 drivers/vfio/pci/vfio_pci.c 	req_len = vma->vm_end - vma->vm_start;
vma              1225 drivers/vfio/pci/vfio_pci.c 	pgoff = vma->vm_pgoff &
vma              1249 drivers/vfio/pci/vfio_pci.c 	vma->vm_private_data = vdev;
vma              1250 drivers/vfio/pci/vfio_pci.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1251 drivers/vfio/pci/vfio_pci.c 	vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
vma              1253 drivers/vfio/pci/vfio_pci.c 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma              1254 drivers/vfio/pci/vfio_pci.c 			       req_len, vma->vm_page_prot);
vma               119 drivers/vfio/pci/vfio_pci_nvlink2.c 	struct vm_area_struct *vma = vmf->vma;
vma               120 drivers/vfio/pci/vfio_pci_nvlink2.c 	struct vfio_pci_region *region = vma->vm_private_data;
vma               122 drivers/vfio/pci/vfio_pci_nvlink2.c 	unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               124 drivers/vfio/pci/vfio_pci_nvlink2.c 	unsigned long vm_pgoff = vma->vm_pgoff &
vma               128 drivers/vfio/pci/vfio_pci_nvlink2.c 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
vma               140 drivers/vfio/pci/vfio_pci_nvlink2.c 		struct vfio_pci_region *region, struct vm_area_struct *vma)
vma               148 drivers/vfio/pci/vfio_pci_nvlink2.c 	if (vma->vm_end - vma->vm_start > data->size)
vma               151 drivers/vfio/pci/vfio_pci_nvlink2.c 	vma->vm_private_data = region;
vma               152 drivers/vfio/pci/vfio_pci_nvlink2.c 	vma->vm_flags |= VM_PFNMAP;
vma               153 drivers/vfio/pci/vfio_pci_nvlink2.c 	vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
vma               161 drivers/vfio/pci/vfio_pci_nvlink2.c 	data->useraddr = vma->vm_start;
vma               166 drivers/vfio/pci/vfio_pci_nvlink2.c 			vma_pages(vma), data->gpu_hpa, &data->mem);
vma               169 drivers/vfio/pci/vfio_pci_nvlink2.c 			vma->vm_end - vma->vm_start, ret);
vma               332 drivers/vfio/pci/vfio_pci_nvlink2.c 		struct vfio_pci_region *region, struct vm_area_struct *vma)
vma               336 drivers/vfio/pci/vfio_pci_nvlink2.c 	unsigned long req_len = vma->vm_end - vma->vm_start;
vma               341 drivers/vfio/pci/vfio_pci_nvlink2.c 	vma->vm_flags |= VM_PFNMAP;
vma               342 drivers/vfio/pci/vfio_pci_nvlink2.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               344 drivers/vfio/pci/vfio_pci_nvlink2.c 	ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
vma               345 drivers/vfio/pci/vfio_pci_nvlink2.c 			req_len, vma->vm_page_prot);
vma               346 drivers/vfio/pci/vfio_pci_nvlink2.c 	trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
vma               347 drivers/vfio/pci/vfio_pci_nvlink2.c 			vma->vm_end - vma->vm_start, ret);
vma                61 drivers/vfio/pci/vfio_pci_private.h 			struct vm_area_struct *vma);
vma               558 drivers/vfio/platform/vfio_platform_common.c 				   struct vm_area_struct *vma)
vma               562 drivers/vfio/platform/vfio_platform_common.c 	req_len = vma->vm_end - vma->vm_start;
vma               563 drivers/vfio/platform/vfio_platform_common.c 	pgoff = vma->vm_pgoff &
vma               570 drivers/vfio/platform/vfio_platform_common.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               571 drivers/vfio/platform/vfio_platform_common.c 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
vma               573 drivers/vfio/platform/vfio_platform_common.c 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma               574 drivers/vfio/platform/vfio_platform_common.c 			       req_len, vma->vm_page_prot);
vma               577 drivers/vfio/platform/vfio_platform_common.c static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
vma               582 drivers/vfio/platform/vfio_platform_common.c 	index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
vma               584 drivers/vfio/platform/vfio_platform_common.c 	if (vma->vm_end < vma->vm_start)
vma               586 drivers/vfio/platform/vfio_platform_common.c 	if (!(vma->vm_flags & VM_SHARED))
vma               590 drivers/vfio/platform/vfio_platform_common.c 	if (vma->vm_start & ~PAGE_MASK)
vma               592 drivers/vfio/platform/vfio_platform_common.c 	if (vma->vm_end & ~PAGE_MASK)
vma               599 drivers/vfio/platform/vfio_platform_common.c 			&& (vma->vm_flags & VM_READ))
vma               603 drivers/vfio/platform/vfio_platform_common.c 			&& (vma->vm_flags & VM_WRITE))
vma               606 drivers/vfio/platform/vfio_platform_common.c 	vma->vm_private_data = vdev;
vma               609 drivers/vfio/platform/vfio_platform_common.c 		return vfio_platform_mmap_mmio(vdev->regions[index], vma);
vma              1258 drivers/vfio/vfio.c static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
vma              1266 drivers/vfio/vfio.c 		ret = driver->ops->mmap(container->iommu_data, vma);
vma              1680 drivers/vfio/vfio.c static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
vma              1687 drivers/vfio/vfio.c 	return device->ops->mmap(device->device_data, vma);
vma               342 drivers/vfio/vfio_iommu_type1.c 	struct vm_area_struct *vma;
vma               380 drivers/vfio/vfio_iommu_type1.c 	vma = find_vma_intersection(mm, vaddr, vaddr + 1);
vma               382 drivers/vfio/vfio_iommu_type1.c 	if (vma && vma->vm_flags & VM_PFNMAP) {
vma               383 drivers/vfio/vfio_iommu_type1.c 		if (!follow_pfn(vma, vaddr, pfn) &&
vma                97 drivers/video/fbdev/68328fb.c static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma);
vma               392 drivers/video/fbdev/68328fb.c static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               397 drivers/video/fbdev/68328fb.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               398 drivers/video/fbdev/68328fb.c 	vma->vm_start = videomemory;
vma               411 drivers/video/fbdev/amba-clcd.c 		       struct vm_area_struct *vma)
vma               414 drivers/video/fbdev/amba-clcd.c 	unsigned long len, off = vma->vm_pgoff << PAGE_SHIFT;
vma               419 drivers/video/fbdev/amba-clcd.c 	if (off <= len && vma->vm_end - vma->vm_start <= len - off &&
vma               421 drivers/video/fbdev/amba-clcd.c 		ret = fb->board->mmap(fb, vma);
vma               787 drivers/video/fbdev/amba-clcd.c static int clcdfb_of_vram_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
vma               792 drivers/video/fbdev/amba-clcd.c 	off = vma->vm_pgoff << PAGE_SHIFT;
vma               793 drivers/video/fbdev/amba-clcd.c 	user_size = vma->vm_end - vma->vm_start;
vma               799 drivers/video/fbdev/amba-clcd.c 	return remap_pfn_range(vma, vma->vm_start,
vma               800 drivers/video/fbdev/amba-clcd.c 			__phys_to_pfn(fb->fb.fix.smem_start) + vma->vm_pgoff,
vma               802 drivers/video/fbdev/amba-clcd.c 			pgprot_writecombine(vma->vm_page_prot));
vma               833 drivers/video/fbdev/amba-clcd.c static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
vma               835 drivers/video/fbdev/amba-clcd.c 	return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
vma               239 drivers/video/fbdev/aty/atyfb_base.c static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
vma              1917 drivers/video/fbdev/aty/atyfb_base.c static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma              1928 drivers/video/fbdev/aty/atyfb_base.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma              1931 drivers/video/fbdev/aty/atyfb_base.c 	off = vma->vm_pgoff << PAGE_SHIFT;
vma              1932 drivers/video/fbdev/aty/atyfb_base.c 	size = vma->vm_end - vma->vm_start;
vma              1936 drivers/video/fbdev/aty/atyfb_base.c 	if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
vma              1940 drivers/video/fbdev/aty/atyfb_base.c 	vma->vm_pgoff = off >> PAGE_SHIFT;	/* propagate off changes */
vma              1966 drivers/video/fbdev/aty/atyfb_base.c 		pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
vma              1967 drivers/video/fbdev/aty/atyfb_base.c 		pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
vma              1969 drivers/video/fbdev/aty/atyfb_base.c 		if (remap_pfn_range(vma, vma->vm_start + page,
vma              1970 drivers/video/fbdev/aty/atyfb_base.c 			map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
vma               341 drivers/video/fbdev/au1100fb.c int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
vma               345 drivers/video/fbdev/au1100fb.c 	pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
vma               347 drivers/video/fbdev/au1100fb.c 	return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
vma              1232 drivers/video/fbdev/au1200fb.c static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma              1236 drivers/video/fbdev/au1200fb.c 	return dma_mmap_attrs(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
vma               163 drivers/video/fbdev/bw2.c static int bw2_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               170 drivers/video/fbdev/bw2.c 				  vma);
vma               267 drivers/video/fbdev/cg14.c static int cg14_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               273 drivers/video/fbdev/cg14.c 				  par->iospace, vma);
vma               227 drivers/video/fbdev/cg3.c static int cg3_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               234 drivers/video/fbdev/cg3.c 				  vma);
vma               592 drivers/video/fbdev/cg6.c static int cg6_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               598 drivers/video/fbdev/cg6.c 				  par->which_io, vma);
vma               130 drivers/video/fbdev/controlfb.c 	struct vm_area_struct *vma);
vma               286 drivers/video/fbdev/controlfb.c                        struct vm_area_struct *vma)
vma               295 drivers/video/fbdev/controlfb.c 	if (vma->vm_pgoff >= mmio_pgoff) {
vma               298 drivers/video/fbdev/controlfb.c 		vma->vm_pgoff -= mmio_pgoff;
vma               301 drivers/video/fbdev/controlfb.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               304 drivers/video/fbdev/controlfb.c 		vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
vma               307 drivers/video/fbdev/controlfb.c 	return vm_iomap_memory(vma, start, len);
vma                44 drivers/video/fbdev/core/fb_defio.c 	struct fb_info *info = vmf->vma->vm_private_data;
vma                56 drivers/video/fbdev/core/fb_defio.c 	if (vmf->vma->vm_file)
vma                57 drivers/video/fbdev/core/fb_defio.c 		page->mapping = vmf->vma->vm_file->f_mapping;
vma                96 drivers/video/fbdev/core/fb_defio.c 	struct fb_info *info = vmf->vma->vm_private_data;
vma               106 drivers/video/fbdev/core/fb_defio.c 	file_update_time(vmf->vma->vm_file);
vma               165 drivers/video/fbdev/core/fb_defio.c int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               167 drivers/video/fbdev/core/fb_defio.c 	vma->vm_ops = &fb_deferred_io_vm_ops;
vma               168 drivers/video/fbdev/core/fb_defio.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               170 drivers/video/fbdev/core/fb_defio.c 		vma->vm_flags |= VM_IO;
vma               171 drivers/video/fbdev/core/fb_defio.c 	vma->vm_private_data = info;
vma              1332 drivers/video/fbdev/core/fbmem.c fb_mmap(struct file *file, struct vm_area_struct * vma)
vma              1351 drivers/video/fbdev/core/fbmem.c 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma              1352 drivers/video/fbdev/core/fbmem.c 		res = fb->fb_mmap(info, vma);
vma              1364 drivers/video/fbdev/core/fbmem.c 	if (vma->vm_pgoff >= mmio_pgoff) {
vma              1370 drivers/video/fbdev/core/fbmem.c 		vma->vm_pgoff -= mmio_pgoff;
vma              1376 drivers/video/fbdev/core/fbmem.c 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma              1381 drivers/video/fbdev/core/fbmem.c 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma              1382 drivers/video/fbdev/core/fbmem.c 	fb_pgprotect(file, vma, start);
vma              1384 drivers/video/fbdev/core/fbmem.c 	return vm_iomap_memory(vma, start, len);
vma               310 drivers/video/fbdev/ep93xx-fb.c static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               312 drivers/video/fbdev/ep93xx-fb.c 	unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
vma               315 drivers/video/fbdev/ep93xx-fb.c 		return dma_mmap_wc(info->dev, vma, info->screen_base,
vma               640 drivers/video/fbdev/fb-puv3.c 		    struct vm_area_struct *vma)
vma               642 drivers/video/fbdev/fb-puv3.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               644 drivers/video/fbdev/fb-puv3.c 	return vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
vma               852 drivers/video/fbdev/ffb.c static int ffb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               858 drivers/video/fbdev/ffb.c 				  0, vma);
vma               997 drivers/video/fbdev/gbefb.c 			struct vm_area_struct *vma)
vma               999 drivers/video/fbdev/gbefb.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma              1000 drivers/video/fbdev/gbefb.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma              1006 drivers/video/fbdev/gbefb.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma              1016 drivers/video/fbdev/gbefb.c 	pgprot_val(vma->vm_page_prot) =
vma              1017 drivers/video/fbdev/gbefb.c 		pgprot_fb(pgprot_val(vma->vm_page_prot));
vma              1023 drivers/video/fbdev/gbefb.c 	addr = vma->vm_start;
vma              1034 drivers/video/fbdev/gbefb.c 		if (remap_pfn_range(vma, addr, phys_addr >> PAGE_SHIFT,
vma              1035 drivers/video/fbdev/gbefb.c 						phys_size, vma->vm_page_prot))
vma               416 drivers/video/fbdev/leo.c static int leo_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               422 drivers/video/fbdev/leo.c 				  par->which_io, vma);
vma               162 drivers/video/fbdev/omap/omapfb.h 					   struct vm_area_struct *vma);
vma              1199 drivers/video/fbdev/omap/omapfb_main.c static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma              1206 drivers/video/fbdev/omap/omapfb_main.c 	r = fbdev->ctrl->mmap(info, vma);
vma              1066 drivers/video/fbdev/omap2/omapfb/omapfb-main.c static void mmap_user_open(struct vm_area_struct *vma)
vma              1068 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	struct omapfb2_mem_region *rg = vma->vm_private_data;
vma              1075 drivers/video/fbdev/omap2/omapfb/omapfb-main.c static void mmap_user_close(struct vm_area_struct *vma)
vma              1077 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	struct omapfb2_mem_region *rg = vma->vm_private_data;
vma              1089 drivers/video/fbdev/omap2/omapfb/omapfb-main.c static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
vma              1104 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 			vma->vm_pgoff << PAGE_SHIFT);
vma              1106 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma              1107 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	vma->vm_ops = &mmap_user_ops;
vma              1108 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	vma->vm_private_data = rg;
vma              1110 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	r = vm_iomap_memory(vma, start, len);
vma               220 drivers/video/fbdev/p9100.c static int p9100_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               226 drivers/video/fbdev/p9100.c 				  par->which_io, vma);
vma               706 drivers/video/fbdev/ps3fb.c static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               710 drivers/video/fbdev/ps3fb.c 	r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
vma               713 drivers/video/fbdev/ps3fb.c 		info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT),
vma               714 drivers/video/fbdev/ps3fb.c 		vma->vm_start);
vma               473 drivers/video/fbdev/pxa3xx-gcu.c pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
vma               475 drivers/video/fbdev/pxa3xx-gcu.c 	unsigned int size = vma->vm_end - vma->vm_start;
vma               478 drivers/video/fbdev/pxa3xx-gcu.c 	switch (vma->vm_pgoff) {
vma               484 drivers/video/fbdev/pxa3xx-gcu.c 		return dma_mmap_coherent(priv->dev, vma,
vma               493 drivers/video/fbdev/pxa3xx-gcu.c 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               495 drivers/video/fbdev/pxa3xx-gcu.c 		return io_remap_pfn_range(vma, vma->vm_start,
vma               497 drivers/video/fbdev/pxa3xx-gcu.c 				size, vma->vm_page_prot);
vma               560 drivers/video/fbdev/sa1100fb.c 			 struct vm_area_struct *vma)
vma               564 drivers/video/fbdev/sa1100fb.c 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
vma               567 drivers/video/fbdev/sa1100fb.c 		vma->vm_pgoff += 1; /* skip over the palette */
vma               568 drivers/video/fbdev/sa1100fb.c 		return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
vma               572 drivers/video/fbdev/sa1100fb.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               574 drivers/video/fbdev/sa1100fb.c 	return vm_iomap_memory(vma, info->fix.mmio_start, info->fix.mmio_len);
vma                45 drivers/video/fbdev/sbuslib.c 		       struct vm_area_struct *vma)
vma                52 drivers/video/fbdev/sbuslib.c 	if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
vma                55 drivers/video/fbdev/sbuslib.c 	size = vma->vm_end - vma->vm_start;
vma                56 drivers/video/fbdev/sbuslib.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma                59 drivers/video/fbdev/sbuslib.c 	off = vma->vm_pgoff << PAGE_SHIFT;
vma                63 drivers/video/fbdev/sbuslib.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma                85 drivers/video/fbdev/sbuslib.c 		r = io_remap_pfn_range(vma,
vma                86 drivers/video/fbdev/sbuslib.c 					vma->vm_start + page,
vma                90 drivers/video/fbdev/sbuslib.c 					vma->vm_page_prot);
vma                21 drivers/video/fbdev/sbuslib.h 			      struct vm_area_struct *vma);
vma              1485 drivers/video/fbdev/sh_mobile_lcdcfb.c sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma              1489 drivers/video/fbdev/sh_mobile_lcdcfb.c 	return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
vma              1959 drivers/video/fbdev/sh_mobile_lcdcfb.c sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma              1963 drivers/video/fbdev/sh_mobile_lcdcfb.c 	return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
vma               775 drivers/video/fbdev/smscufx.c static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               777 drivers/video/fbdev/smscufx.c 	unsigned long start = vma->vm_start;
vma               778 drivers/video/fbdev/smscufx.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               779 drivers/video/fbdev/smscufx.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               782 drivers/video/fbdev/smscufx.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma               796 drivers/video/fbdev/smscufx.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
vma               301 drivers/video/fbdev/tcx.c static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               307 drivers/video/fbdev/tcx.c 				  par->which_io, vma);
vma               322 drivers/video/fbdev/udlfb.c static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               324 drivers/video/fbdev/udlfb.c 	unsigned long start = vma->vm_start;
vma               325 drivers/video/fbdev/udlfb.c 	unsigned long size = vma->vm_end - vma->vm_start;
vma               326 drivers/video/fbdev/udlfb.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               329 drivers/video/fbdev/udlfb.c 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
vma               343 drivers/video/fbdev/udlfb.c 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
vma               987 drivers/video/fbdev/vermilion/vermilion.c static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma               990 drivers/video/fbdev/vermilion/vermilion.c 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
vma               998 drivers/video/fbdev/vermilion/vermilion.c 	prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
vma               999 drivers/video/fbdev/vermilion/vermilion.c 	pgprot_val(vma->vm_page_prot) =
vma              1002 drivers/video/fbdev/vermilion/vermilion.c 	return vm_iomap_memory(vma, vinfo->vram_start,
vma                79 drivers/video/fbdev/vfb.c 		    struct vm_area_struct *vma);
vma               385 drivers/video/fbdev/vfb.c 		    struct vm_area_struct *vma)
vma               387 drivers/video/fbdev/vfb.c 	return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
vma               798 drivers/vme/vme.c int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
vma               810 drivers/vme/vme.c 	phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
vma               811 drivers/vme/vme.c 	vma_size = vma->vm_end - vma->vm_start;
vma               818 drivers/vme/vme.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               820 drivers/vme/vme.c 	return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
vma               462 drivers/xen/gntalloc.c static void gntalloc_vma_open(struct vm_area_struct *vma)
vma               464 drivers/xen/gntalloc.c 	struct gntalloc_vma_private_data *priv = vma->vm_private_data;
vma               474 drivers/xen/gntalloc.c static void gntalloc_vma_close(struct vm_area_struct *vma)
vma               476 drivers/xen/gntalloc.c 	struct gntalloc_vma_private_data *priv = vma->vm_private_data;
vma               505 drivers/xen/gntalloc.c static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
vma               510 drivers/xen/gntalloc.c 	int count = vma_pages(vma);
vma               513 drivers/xen/gntalloc.c 	if (!(vma->vm_flags & VM_SHARED)) {
vma               525 drivers/xen/gntalloc.c 		       priv, vm_priv, vma->vm_pgoff, count);
vma               527 drivers/xen/gntalloc.c 	gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
vma               540 drivers/xen/gntalloc.c 	vma->vm_private_data = vm_priv;
vma               542 drivers/xen/gntalloc.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               544 drivers/xen/gntalloc.c 	vma->vm_ops = &gntalloc_vmops;
vma               548 drivers/xen/gntalloc.c 		rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
vma                53 drivers/xen/gntdev-common.h 	struct vm_area_struct *vma;
vma               359 drivers/xen/gntdev-dmabuf.c 			       struct vm_area_struct *vma)
vma               268 drivers/xen/gntdev.c 	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
vma               431 drivers/xen/gntdev.c static void gntdev_vma_open(struct vm_area_struct *vma)
vma               433 drivers/xen/gntdev.c 	struct gntdev_grant_map *map = vma->vm_private_data;
vma               435 drivers/xen/gntdev.c 	pr_debug("gntdev_vma_open %p\n", vma);
vma               439 drivers/xen/gntdev.c static void gntdev_vma_close(struct vm_area_struct *vma)
vma               441 drivers/xen/gntdev.c 	struct gntdev_grant_map *map = vma->vm_private_data;
vma               442 drivers/xen/gntdev.c 	struct file *file = vma->vm_file;
vma               445 drivers/xen/gntdev.c 	pr_debug("gntdev_vma_close %p\n", vma);
vma               455 drivers/xen/gntdev.c 		map->vma = NULL;
vma               458 drivers/xen/gntdev.c 	vma->vm_private_data = NULL;
vma               462 drivers/xen/gntdev.c static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
vma               465 drivers/xen/gntdev.c 	struct gntdev_grant_map *map = vma->vm_private_data;
vma               481 drivers/xen/gntdev.c 	if (!map->vma)
vma               483 drivers/xen/gntdev.c 	if (map->vma->vm_start >= end)
vma               485 drivers/xen/gntdev.c 	if (map->vma->vm_end <= start)
vma               504 drivers/xen/gntdev.c 	mstart = max(start, map->vma->vm_start);
vma               505 drivers/xen/gntdev.c 	mend   = min(end,   map->vma->vm_end);
vma               508 drivers/xen/gntdev.c 			map->vma->vm_start, map->vma->vm_end,
vma               511 drivers/xen/gntdev.c 				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
vma               558 drivers/xen/gntdev.c 		if (!map->vma)
vma               562 drivers/xen/gntdev.c 				map->vma->vm_start, map->vma->vm_end);
vma               567 drivers/xen/gntdev.c 		if (!map->vma)
vma               571 drivers/xen/gntdev.c 				map->vma->vm_start, map->vma->vm_end);
vma               731 drivers/xen/gntdev.c 	struct vm_area_struct *vma;
vma               740 drivers/xen/gntdev.c 	vma = find_vma(current->mm, op.vaddr);
vma               741 drivers/xen/gntdev.c 	if (!vma || vma->vm_ops != &gntdev_vmops)
vma               744 drivers/xen/gntdev.c 	map = vma->vm_private_data;
vma              1070 drivers/xen/gntdev.c static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma              1073 drivers/xen/gntdev.c 	int index = vma->vm_pgoff;
vma              1074 drivers/xen/gntdev.c 	int count = vma_pages(vma);
vma              1078 drivers/xen/gntdev.c 	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
vma              1082 drivers/xen/gntdev.c 			index, count, vma->vm_start, vma->vm_pgoff);
vma              1088 drivers/xen/gntdev.c 	if (use_ptemod && map->vma)
vma              1090 drivers/xen/gntdev.c 	if (use_ptemod && priv->mm != vma->vm_mm) {
vma              1097 drivers/xen/gntdev.c 	vma->vm_ops = &gntdev_vmops;
vma              1099 drivers/xen/gntdev.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
vma              1102 drivers/xen/gntdev.c 		vma->vm_flags |= VM_DONTCOPY;
vma              1104 drivers/xen/gntdev.c 	vma->vm_private_data = map;
vma              1107 drivers/xen/gntdev.c 		map->vma = vma;
vma              1110 drivers/xen/gntdev.c 		if ((vma->vm_flags & VM_WRITE) &&
vma              1115 drivers/xen/gntdev.c 		if (!(vma->vm_flags & VM_WRITE))
vma              1122 drivers/xen/gntdev.c 		map->pages_vm_start = vma->vm_start;
vma              1123 drivers/xen/gntdev.c 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma              1124 drivers/xen/gntdev.c 					  vma->vm_end - vma->vm_start,
vma              1137 drivers/xen/gntdev.c 		err = vm_map_pages_zero(vma, map->pages, map->count);
vma              1152 drivers/xen/gntdev.c 			apply_to_page_range(vma->vm_mm, vma->vm_start,
vma              1153 drivers/xen/gntdev.c 					    vma->vm_end - vma->vm_start,
vma              1169 drivers/xen/gntdev.c 		map->vma = NULL;
vma                86 drivers/xen/privcmd-buf.c static void privcmd_buf_vma_open(struct vm_area_struct *vma)
vma                88 drivers/xen/privcmd-buf.c 	struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
vma                98 drivers/xen/privcmd-buf.c static void privcmd_buf_vma_close(struct vm_area_struct *vma)
vma               100 drivers/xen/privcmd-buf.c 	struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
vma               120 drivers/xen/privcmd-buf.c 		 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
vma               132 drivers/xen/privcmd-buf.c static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
vma               136 drivers/xen/privcmd-buf.c 	unsigned long count = vma_pages(vma);
vma               140 drivers/xen/privcmd-buf.c 	if (!(vma->vm_flags & VM_SHARED))
vma               159 drivers/xen/privcmd-buf.c 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
vma               160 drivers/xen/privcmd-buf.c 	vma->vm_ops = &privcmd_buf_vm_ops;
vma               161 drivers/xen/privcmd-buf.c 	vma->vm_private_data = vma_priv;
vma               168 drivers/xen/privcmd-buf.c 		ret = vm_map_pages_zero(vma, vma_priv->pages,
vma                66 drivers/xen/privcmd.c                struct vm_area_struct *vma,
vma               219 drivers/xen/privcmd.c 	struct vm_area_struct *vma;
vma               227 drivers/xen/privcmd.c 	struct vm_area_struct *vma = st->vma;
vma               237 drivers/xen/privcmd.c 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
vma               240 drivers/xen/privcmd.c 	rc = xen_remap_domain_gfn_range(vma,
vma               243 drivers/xen/privcmd.c 					vma->vm_page_prot,
vma               258 drivers/xen/privcmd.c 	struct vm_area_struct *vma;
vma               288 drivers/xen/privcmd.c 		vma = find_vma(mm, msg->va);
vma               291 drivers/xen/privcmd.c 		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
vma               293 drivers/xen/privcmd.c 		vma->vm_private_data = PRIV_VMA_LOCKED;
vma               296 drivers/xen/privcmd.c 	state.va = vma->vm_start;
vma               297 drivers/xen/privcmd.c 	state.vma = vma;
vma               317 drivers/xen/privcmd.c 	struct vm_area_struct *vma;
vma               341 drivers/xen/privcmd.c 	struct vm_area_struct *vma = st->vma;
vma               342 drivers/xen/privcmd.c 	struct page **pages = vma->vm_private_data;
vma               350 drivers/xen/privcmd.c 	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
vma               351 drivers/xen/privcmd.c 					 (int *)gfnp, st->vma->vm_page_prot,
vma               421 drivers/xen/privcmd.c static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
vma               437 drivers/xen/privcmd.c 	BUG_ON(vma->vm_private_data != NULL);
vma               438 drivers/xen/privcmd.c 	vma->vm_private_data = pages;
vma               452 drivers/xen/privcmd.c 	struct vm_area_struct *vma;
vma               504 drivers/xen/privcmd.c 	vma = find_vma(mm, m.addr);
vma               505 drivers/xen/privcmd.c 	if (!vma ||
vma               506 drivers/xen/privcmd.c 	    vma->vm_ops != &privcmd_vm_ops) {
vma               522 drivers/xen/privcmd.c 	if (vma->vm_private_data == NULL) {
vma               523 drivers/xen/privcmd.c 		if (m.addr != vma->vm_start ||
vma               524 drivers/xen/privcmd.c 		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
vma               529 drivers/xen/privcmd.c 			ret = alloc_empty_pages(vma, nr_pages);
vma               533 drivers/xen/privcmd.c 			vma->vm_private_data = PRIV_VMA_LOCKED;
vma               535 drivers/xen/privcmd.c 		if (m.addr < vma->vm_start ||
vma               536 drivers/xen/privcmd.c 		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
vma               540 drivers/xen/privcmd.c 		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
vma               547 drivers/xen/privcmd.c 	state.vma           = vma;
vma               731 drivers/xen/privcmd.c 	struct vm_area_struct *vma;
vma               746 drivers/xen/privcmd.c 	vma = find_vma(mm, kdata.addr);
vma               747 drivers/xen/privcmd.c 	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
vma               764 drivers/xen/privcmd.c 		rc = alloc_empty_pages(vma, nr);
vma               768 drivers/xen/privcmd.c 		pages = vma->vm_private_data;
vma               776 drivers/xen/privcmd.c 		vma->vm_private_data = PRIV_VMA_LOCKED;
vma               795 drivers/xen/privcmd.c 		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
vma               802 drivers/xen/privcmd.c 		num = xen_remap_domain_mfn_array(vma,
vma               805 drivers/xen/privcmd.c 						 vma->vm_page_prot,
vma               807 drivers/xen/privcmd.c 						 vma->vm_private_data);
vma               893 drivers/xen/privcmd.c static void privcmd_close(struct vm_area_struct *vma)
vma               895 drivers/xen/privcmd.c 	struct page **pages = vma->vm_private_data;
vma               896 drivers/xen/privcmd.c 	int numpgs = vma_pages(vma);
vma               897 drivers/xen/privcmd.c 	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
vma               903 drivers/xen/privcmd.c 	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
vma               915 drivers/xen/privcmd.c 	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
vma               926 drivers/xen/privcmd.c static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
vma               930 drivers/xen/privcmd.c 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
vma               932 drivers/xen/privcmd.c 	vma->vm_ops = &privcmd_vm_ops;
vma               933 drivers/xen/privcmd.c 	vma->vm_private_data = NULL;
vma               949 drivers/xen/privcmd.c 	           struct vm_area_struct *vma,
vma               953 drivers/xen/privcmd.c 	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
vma                92 drivers/xen/xenbus/xenbus_dev_backend.c static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
vma                94 drivers/xen/xenbus/xenbus_dev_backend.c 	size_t size = vma->vm_end - vma->vm_start;
vma                99 drivers/xen/xenbus/xenbus_dev_backend.c 	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
vma               102 drivers/xen/xenbus/xenbus_dev_backend.c 	if (remap_pfn_range(vma, vma->vm_start,
vma               104 drivers/xen/xenbus/xenbus_dev_backend.c 			    size, vma->vm_page_prot))
vma                34 drivers/xen/xenfs/xenstored.c static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
vma                36 drivers/xen/xenfs/xenstored.c 	size_t size = vma->vm_end - vma->vm_start;
vma                38 drivers/xen/xenfs/xenstored.c 	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
vma                41 drivers/xen/xenfs/xenstored.c 	if (remap_pfn_range(vma, vma->vm_start,
vma                43 drivers/xen/xenfs/xenstored.c 			    size, vma->vm_page_prot))
vma                69 drivers/xen/xlate_mmu.c 	struct vm_area_struct *vma;
vma               138 drivers/xen/xlate_mmu.c 		set_pte_at(info->vma->vm_mm, addr, ptep, pte);
vma               143 drivers/xen/xlate_mmu.c int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
vma               156 drivers/xen/xlate_mmu.c 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
vma               162 drivers/xen/xlate_mmu.c 	data.vma   = vma;
vma               168 drivers/xen/xlate_mmu.c 	err = apply_to_page_range(vma->vm_mm, addr, range,
vma               183 drivers/xen/xlate_mmu.c int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
vma               286 drivers/xen/xlate_mmu.c int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
vma               289 drivers/xen/xlate_mmu.c 		.mm = vma->vm_mm,
vma               290 drivers/xen/xlate_mmu.c 		.pages = vma->vm_private_data,
vma               291 drivers/xen/xlate_mmu.c 		.prot = vma->vm_page_prot,
vma               294 drivers/xen/xlate_mmu.c 	return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
vma               492 fs/9p/vfs_file.c v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
vma               497 fs/9p/vfs_file.c 	retval = generic_file_mmap(filp, vma);
vma               499 fs/9p/vfs_file.c 		vma->vm_ops = &v9fs_file_vm_ops;
vma               505 fs/9p/vfs_file.c v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
vma               516 fs/9p/vfs_file.c 	    (vma->vm_flags & VM_SHARED) &&
vma               517 fs/9p/vfs_file.c 	    (vma->vm_flags & VM_WRITE)) {
vma               535 fs/9p/vfs_file.c 	retval = generic_file_mmap(filp, vma);
vma               537 fs/9p/vfs_file.c 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
vma               547 fs/9p/vfs_file.c 	struct file *filp = vmf->vma->vm_file;
vma               605 fs/9p/vfs_file.c static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
vma               612 fs/9p/vfs_file.c 		.range_start = vma->vm_pgoff * PAGE_SIZE,
vma               614 fs/9p/vfs_file.c 		.range_end = vma->vm_pgoff * PAGE_SIZE +
vma               615 fs/9p/vfs_file.c 			(vma->vm_end - vma->vm_start - 1),
vma               618 fs/9p/vfs_file.c 	if (!(vma->vm_flags & VM_SHARED))
vma               621 fs/9p/vfs_file.c 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
vma               623 fs/9p/vfs_file.c 	inode = file_inode(vma->vm_file);
vma                19 fs/afs/file.c  static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
vma               676 fs/afs/file.c  static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               680 fs/afs/file.c  	ret = generic_file_mmap(file, vma);
vma               682 fs/afs/file.c  		vma->vm_ops = &afs_vm_ops;
vma               778 fs/afs/write.c 	struct file *file = vmf->vma->vm_file;
vma               328 fs/aio.c       static int aio_ring_mremap(struct vm_area_struct *vma)
vma               330 fs/aio.c       	struct file *file = vma->vm_file;
vma               331 fs/aio.c       	struct mm_struct *mm = vma->vm_mm;
vma               344 fs/aio.c       				ctx->user_id = ctx->mmap_base = vma->vm_start;
vma               365 fs/aio.c       static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
vma               367 fs/aio.c       	vma->vm_flags |= VM_DONTEXPAND;
vma               368 fs/aio.c       	vma->vm_ops = &aio_ring_vm_ops;
vma               181 fs/binfmt_elf.c 	struct vm_area_struct *vma;
vma               302 fs/binfmt_elf.c 	vma = find_extend_vma(current->mm, bprm->p);
vma               303 fs/binfmt_elf.c 	if (!vma)
vma              1275 fs/binfmt_elf.c static bool always_dump_vma(struct vm_area_struct *vma)
vma              1278 fs/binfmt_elf.c 	if (vma == get_gate_vma(vma->vm_mm))
vma              1285 fs/binfmt_elf.c 	if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
vma              1292 fs/binfmt_elf.c 	if (arch_vma_name(vma))
vma              1301 fs/binfmt_elf.c static unsigned long vma_dump_size(struct vm_area_struct *vma,
vma              1307 fs/binfmt_elf.c 	if (always_dump_vma(vma))
vma              1310 fs/binfmt_elf.c 	if (vma->vm_flags & VM_DONTDUMP)
vma              1314 fs/binfmt_elf.c 	if (vma_is_dax(vma)) {
vma              1315 fs/binfmt_elf.c 		if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
vma              1317 fs/binfmt_elf.c 		if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
vma              1323 fs/binfmt_elf.c 	if (vma->vm_flags & VM_HUGETLB) {
vma              1324 fs/binfmt_elf.c 		if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
vma              1326 fs/binfmt_elf.c 		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
vma              1332 fs/binfmt_elf.c 	if (vma->vm_flags & VM_IO)
vma              1336 fs/binfmt_elf.c 	if (vma->vm_flags & VM_SHARED) {
vma              1337 fs/binfmt_elf.c 		if (file_inode(vma->vm_file)->i_nlink == 0 ?
vma              1344 fs/binfmt_elf.c 	if (vma->anon_vma && FILTER(ANON_PRIVATE))
vma              1346 fs/binfmt_elf.c 	if (vma->vm_file == NULL)
vma              1358 fs/binfmt_elf.c 	    vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
vma              1359 fs/binfmt_elf.c 		u32 __user *header = (u32 __user *) vma->vm_start;
vma              1391 fs/binfmt_elf.c 	return vma->vm_end - vma->vm_start;
vma              1583 fs/binfmt_elf.c 	struct vm_area_struct *vma;
vma              1608 fs/binfmt_elf.c 	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
vma              1612 fs/binfmt_elf.c 		file = vma->vm_file;
vma              1632 fs/binfmt_elf.c 		*start_end_ofs++ = vma->vm_start;
vma              1633 fs/binfmt_elf.c 		*start_end_ofs++ = vma->vm_end;
vma              1634 fs/binfmt_elf.c 		*start_end_ofs++ = vma->vm_pgoff;
vma              2190 fs/binfmt_elf.c 	struct vm_area_struct *vma, *gate_vma;
vma              2273 fs/binfmt_elf.c 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
vma              2274 fs/binfmt_elf.c 			vma = next_vma(vma, gate_vma)) {
vma              2277 fs/binfmt_elf.c 		dump_size = vma_dump_size(vma, cprm->mm_flags);
vma              2302 fs/binfmt_elf.c 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
vma              2303 fs/binfmt_elf.c 			vma = next_vma(vma, gate_vma)) {
vma              2308 fs/binfmt_elf.c 		phdr.p_vaddr = vma->vm_start;
vma              2311 fs/binfmt_elf.c 		phdr.p_memsz = vma->vm_end - vma->vm_start;
vma              2313 fs/binfmt_elf.c 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
vma              2314 fs/binfmt_elf.c 		if (vma->vm_flags & VM_WRITE)
vma              2316 fs/binfmt_elf.c 		if (vma->vm_flags & VM_EXEC)
vma              2338 fs/binfmt_elf.c 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
vma              2339 fs/binfmt_elf.c 			vma = next_vma(vma, gate_vma)) {
vma              2343 fs/binfmt_elf.c 		end = vma->vm_start + vma_filesz[i++];
vma              2345 fs/binfmt_elf.c 		for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
vma              1193 fs/binfmt_elf_fdpic.c static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
vma              1198 fs/binfmt_elf_fdpic.c 	if (vma->vm_flags & VM_IO) {
vma              1199 fs/binfmt_elf_fdpic.c 		kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
vma              1206 fs/binfmt_elf_fdpic.c 	if (!(vma->vm_flags & VM_READ)) {
vma              1207 fs/binfmt_elf_fdpic.c 		kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags);
vma              1212 fs/binfmt_elf_fdpic.c 	if (vma_is_dax(vma)) {
vma              1213 fs/binfmt_elf_fdpic.c 		if (vma->vm_flags & VM_SHARED) {
vma              1215 fs/binfmt_elf_fdpic.c 			kdcore("%08lx: %08lx: %s (DAX shared)", vma->vm_start,
vma              1216 fs/binfmt_elf_fdpic.c 			       vma->vm_flags, dump_ok ? "yes" : "no");
vma              1219 fs/binfmt_elf_fdpic.c 			kdcore("%08lx: %08lx: %s (DAX private)", vma->vm_start,
vma              1220 fs/binfmt_elf_fdpic.c 			       vma->vm_flags, dump_ok ? "yes" : "no");
vma              1226 fs/binfmt_elf_fdpic.c 	if (vma->vm_flags & VM_SHARED) {
vma              1227 fs/binfmt_elf_fdpic.c 		if (file_inode(vma->vm_file)->i_nlink == 0) {
vma              1229 fs/binfmt_elf_fdpic.c 			kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
vma              1230 fs/binfmt_elf_fdpic.c 			       vma->vm_flags, dump_ok ? "yes" : "no");
vma              1235 fs/binfmt_elf_fdpic.c 		kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
vma              1236 fs/binfmt_elf_fdpic.c 		       vma->vm_flags, dump_ok ? "yes" : "no");
vma              1242 fs/binfmt_elf_fdpic.c 	if (!vma->anon_vma) {
vma              1244 fs/binfmt_elf_fdpic.c 		kdcore("%08lx: %08lx: %s (!anon)", vma->vm_start,
vma              1245 fs/binfmt_elf_fdpic.c 		       vma->vm_flags, dump_ok ? "yes" : "no");
vma              1251 fs/binfmt_elf_fdpic.c 	kdcore("%08lx: %08lx: %s", vma->vm_start, vma->vm_flags,
vma              1495 fs/binfmt_elf_fdpic.c 	struct vm_area_struct *vma;
vma              1497 fs/binfmt_elf_fdpic.c 	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
vma              1502 fs/binfmt_elf_fdpic.c 		if (!maydump(vma, cprm->mm_flags))
vma              1506 fs/binfmt_elf_fdpic.c 		for (addr = vma->vm_start; addr < vma->vm_end;
vma              1522 fs/binfmt_elf_fdpic.c 		if (!dump_emit(cprm, (void *) vma->vm_start,
vma              1523 fs/binfmt_elf_fdpic.c 				vma->vm_end - vma->vm_start))
vma              1532 fs/binfmt_elf_fdpic.c 	struct vm_area_struct *vma;
vma              1535 fs/binfmt_elf_fdpic.c 	for (vma = current->mm->mmap; vma; vma = vma->vm_next)
vma              1536 fs/binfmt_elf_fdpic.c 		if (maydump(vma, mm_flags))
vma              1537 fs/binfmt_elf_fdpic.c 			size += vma->vm_end - vma->vm_start;
vma              1555 fs/binfmt_elf_fdpic.c 	struct vm_area_struct *vma;
vma              1727 fs/binfmt_elf_fdpic.c 	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
vma              1731 fs/binfmt_elf_fdpic.c 		sz = vma->vm_end - vma->vm_start;
vma              1735 fs/binfmt_elf_fdpic.c 		phdr.p_vaddr = vma->vm_start;
vma              1737 fs/binfmt_elf_fdpic.c 		phdr.p_filesz = maydump(vma, cprm->mm_flags) ? sz : 0;
vma              1740 fs/binfmt_elf_fdpic.c 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
vma              1741 fs/binfmt_elf_fdpic.c 		if (vma->vm_flags & VM_WRITE)
vma              1743 fs/binfmt_elf_fdpic.c 		if (vma->vm_flags & VM_EXEC)
vma              2247 fs/btrfs/file.c static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
vma              2255 fs/btrfs/file.c 	vma->vm_ops = &btrfs_file_vm_ops;
vma              9049 fs/btrfs/inode.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma              9084 fs/btrfs/inode.c 		ret2 = file_update_time(vmf->vma->vm_file);
vma              2467 fs/buffer.c    int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
vma              2471 fs/buffer.c    	struct inode *inode = file_inode(vma->vm_file);
vma              1425 fs/ceph/addr.c 	struct vm_area_struct *vma = vmf->vma;
vma              1426 fs/ceph/addr.c 	struct inode *inode = file_inode(vma->vm_file);
vma              1428 fs/ceph/addr.c 	struct ceph_file_info *fi = vma->vm_file->private_data;
vma              1445 fs/ceph/addr.c 	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1,
vma              1517 fs/ceph/addr.c 	struct vm_area_struct *vma = vmf->vma;
vma              1518 fs/ceph/addr.c 	struct inode *inode = file_inode(vma->vm_file);
vma              1520 fs/ceph/addr.c 	struct ceph_file_info *fi = vma->vm_file->private_data;
vma              1543 fs/ceph/addr.c 		err = ceph_uninline_data(vma->vm_file, locked_page);
vma              1563 fs/ceph/addr.c 	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len,
vma              1572 fs/ceph/addr.c 	file_update_time(vma->vm_file);
vma              1584 fs/ceph/addr.c 		err = ceph_update_writeable_page(vma->vm_file, off, len, page);
vma              1798 fs/ceph/addr.c int ceph_mmap(struct file *file, struct vm_area_struct *vma)
vma              1805 fs/ceph/addr.c 	vma->vm_ops = &ceph_vmops;
vma              1087 fs/ceph/super.h extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
vma              4054 fs/cifs/file.c int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
vma              4064 fs/cifs/file.c 		rc = generic_file_mmap(file, vma);
vma              4066 fs/cifs/file.c 		vma->vm_ops = &cifs_file_vm_ops;
vma              4072 fs/cifs/file.c int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma              4083 fs/cifs/file.c 		rc = generic_file_mmap(file, vma);
vma              4085 fs/cifs/file.c 		vma->vm_ops = &cifs_file_vm_ops;
vma                96 fs/coda/file.c coda_vm_open(struct vm_area_struct *vma)
vma                99 fs/coda/file.c 		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
vma               104 fs/coda/file.c 		cvm_ops->host_vm_ops->open(vma);
vma               108 fs/coda/file.c coda_vm_close(struct vm_area_struct *vma)
vma               111 fs/coda/file.c 		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
vma               114 fs/coda/file.c 		cvm_ops->host_vm_ops->close(vma);
vma               117 fs/coda/file.c 		vma->vm_ops = cvm_ops->host_vm_ops;
vma               124 fs/coda/file.c coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
vma               139 fs/coda/file.c 	if (WARN_ON(coda_file != vma->vm_file))
vma               142 fs/coda/file.c 	count = vma->vm_end - vma->vm_start;
vma               143 fs/coda/file.c 	ppos = vma->vm_pgoff * PAGE_SIZE;
vma               174 fs/coda/file.c 	vma->vm_file = get_file(host_file);
vma               175 fs/coda/file.c 	ret = call_mmap(vma->vm_file, vma);
vma               185 fs/coda/file.c 		cvm_ops->host_vm_ops = vma->vm_ops;
vma               186 fs/coda/file.c 		if (vma->vm_ops)
vma               187 fs/coda/file.c 			cvm_ops->vm_ops = *vma->vm_ops;
vma               194 fs/coda/file.c 		vma->vm_ops = &cvm_ops->vm_ops;
vma               355 fs/cramfs/inode.c static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
vma               360 fs/cramfs/inode.c 	unsigned long address, pgoff = vma->vm_pgoff;
vma               364 fs/cramfs/inode.c 	ret = generic_file_readonly_mmap(file, vma);
vma               375 fs/cramfs/inode.c 	if (vma->vm_flags & VM_WRITE)
vma               382 fs/cramfs/inode.c 	pages = min(vma_pages(vma), max_pages - pgoff);
vma               405 fs/cramfs/inode.c 	if (pages == vma_pages(vma)) {
vma               412 fs/cramfs/inode.c 		ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
vma               413 fs/cramfs/inode.c 				      pages * PAGE_SIZE, vma->vm_page_prot);
vma               421 fs/cramfs/inode.c 		vma->vm_flags |= VM_MIXEDMAP;
vma               426 fs/cramfs/inode.c 			vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
vma               436 fs/cramfs/inode.c 			 address, pages, vma_pages(vma), vma->vm_start,
vma               437 fs/cramfs/inode.c 			 (unsigned long long)pgprot_val(vma->vm_page_prot));
vma               449 fs/cramfs/inode.c static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
vma               451 fs/cramfs/inode.c 	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
vma               332 fs/dax.c       		struct vm_area_struct *vma, unsigned long address)
vma               340 fs/dax.c       	index = linear_page_index(vma, address & ~(size - 1));
vma               741 fs/dax.c       		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
vma               766 fs/dax.c       unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
vma               770 fs/dax.c       	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
vma               771 fs/dax.c       	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
vma               779 fs/dax.c       	struct vm_area_struct *vma;
vma               785 fs/dax.c       	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
vma               791 fs/dax.c       		if (!(vma->vm_flags & VM_SHARED))
vma               794 fs/dax.c       		address = pgoff_address(index, vma);
vma               801 fs/dax.c       		if (follow_pte_pmd(vma->vm_mm, address, &range,
vma               821 fs/dax.c       			flush_cache_page(vma, address, pfn);
vma               822 fs/dax.c       			pmd = pmdp_invalidate(vma, address, pmdp);
vma               825 fs/dax.c       			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
vma               835 fs/dax.c       			flush_cache_page(vma, address, pfn);
vma               836 fs/dax.c       			pte = ptep_clear_flush(vma, address, ptep);
vma               839 fs/dax.c       			set_pte_at(vma->vm_mm, address, ptep, pte);
vma              1042 fs/dax.c       	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
vma              1239 fs/dax.c       		struct vm_area_struct *vma, struct iomap *iomap)
vma              1241 fs/dax.c       	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
vma              1248 fs/dax.c       	struct vm_area_struct *vma = vmf->vma;
vma              1249 fs/dax.c       	struct address_space *mapping = vma->vm_file->f_mapping;
vma              1339 fs/dax.c       	sync = dax_fault_is_synchronous(flags, vma, &iomap);
vma              1345 fs/dax.c       			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
vma              1372 fs/dax.c       			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
vma              1374 fs/dax.c       			ret = vmf_insert_mixed(vma, vaddr, pfn);
vma              1417 fs/dax.c       	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
vma              1419 fs/dax.c       	struct vm_area_struct *vma = vmf->vma;
vma              1427 fs/dax.c       	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
vma              1437 fs/dax.c       		pgtable = pte_alloc_one(vma->vm_mm);
vma              1442 fs/dax.c       	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
vma              1449 fs/dax.c       		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
vma              1450 fs/dax.c       		mm_inc_nr_ptes(vma->vm_mm);
vma              1452 fs/dax.c       	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
vma              1454 fs/dax.c       	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
vma              1461 fs/dax.c       		pte_free(vma->vm_mm, pgtable);
vma              1469 fs/dax.c       	struct vm_area_struct *vma = vmf->vma;
vma              1470 fs/dax.c       	struct address_space *mapping = vma->vm_file->f_mapping;
vma              1505 fs/dax.c       	if (write && !(vma->vm_flags & VM_SHARED))
vma              1509 fs/dax.c       	if (pmd_addr < vma->vm_start)
vma              1511 fs/dax.c       	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
vma              1560 fs/dax.c       	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
vma              1618 fs/dax.c       		split_huge_pmd(vma, vmf->pmd, vmf->address);
vma              1672 fs/dax.c       	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
vma              1692 fs/dax.c       		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
vma              1722 fs/dax.c       	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
vma               169 fs/ecryptfs/file.c static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
vma               179 fs/ecryptfs/file.c 	return generic_file_mmap(file, vma);
vma               204 fs/exec.c      		ret = expand_downwards(bprm->vma, pos);
vma               223 fs/exec.c      		acct_arg_size(bprm, vma_pages(bprm->vma));
vma               240 fs/exec.c      	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
vma               246 fs/exec.c      	struct vm_area_struct *vma = NULL;
vma               249 fs/exec.c      	bprm->vma = vma = vm_area_alloc(mm);
vma               250 fs/exec.c      	if (!vma)
vma               252 fs/exec.c      	vma_set_anonymous(vma);
vma               266 fs/exec.c      	vma->vm_end = STACK_TOP_MAX;
vma               267 fs/exec.c      	vma->vm_start = vma->vm_end - PAGE_SIZE;
vma               268 fs/exec.c      	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma               269 fs/exec.c      	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma               271 fs/exec.c      	err = insert_vm_struct(mm, vma);
vma               276 fs/exec.c      	arch_bprm_mm_init(mm, vma);
vma               278 fs/exec.c      	bprm->p = vma->vm_end - sizeof(void *);
vma               283 fs/exec.c      	bprm->vma = NULL;
vma               284 fs/exec.c      	vm_area_free(vma);
vma               626 fs/exec.c      static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
vma               628 fs/exec.c      	struct mm_struct *mm = vma->vm_mm;
vma               629 fs/exec.c      	unsigned long old_start = vma->vm_start;
vma               630 fs/exec.c      	unsigned long old_end = vma->vm_end;
vma               642 fs/exec.c      	if (vma != find_vma(mm, new_start))
vma               648 fs/exec.c      	if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
vma               655 fs/exec.c      	if (length != move_page_tables(vma, old_start,
vma               656 fs/exec.c      				       vma, new_start, length, false))
vma               666 fs/exec.c      			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
vma               675 fs/exec.c      			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
vma               682 fs/exec.c      	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
vma               698 fs/exec.c      	struct vm_area_struct *vma = bprm->vma;
vma               716 fs/exec.c      	if (vma->vm_end - vma->vm_start > stack_base)
vma               721 fs/exec.c      	stack_shift = vma->vm_start - stack_base;
vma               723 fs/exec.c      	bprm->p = vma->vm_end - stack_shift;
vma               729 fs/exec.c      	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
vma               732 fs/exec.c      	stack_shift = vma->vm_end - stack_top;
vma               759 fs/exec.c      	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
vma               763 fs/exec.c      	BUG_ON(prev != vma);
vma               767 fs/exec.c      		ret = shift_arg_pages(vma, stack_shift);
vma               773 fs/exec.c      	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
vma               776 fs/exec.c      	stack_size = vma->vm_end - vma->vm_start;
vma               784 fs/exec.c      		stack_base = vma->vm_start + rlim_stack;
vma               786 fs/exec.c      		stack_base = vma->vm_end + stack_expand;
vma               789 fs/exec.c      		stack_base = vma->vm_end - rlim_stack;
vma               791 fs/exec.c      		stack_base = vma->vm_start - stack_expand;
vma               794 fs/exec.c      	ret = expand_stack(vma, stack_base);
vma                93 fs/ext2/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma                99 fs/ext2/file.c 		file_update_time(vmf->vma->vm_file);
vma               122 fs/ext2/file.c static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               125 fs/ext2/file.c 		return generic_file_mmap(file, vma);
vma               128 fs/ext2/file.c 	vma->vm_ops = &ext2_dax_vm_ops;
vma               298 fs/ext4/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma               313 fs/ext4/file.c 		(vmf->vma->vm_flags & VM_SHARED);
vma               318 fs/ext4/file.c 		file_update_time(vmf->vma->vm_file);
vma               371 fs/ext4/file.c static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               384 fs/ext4/file.c 	if (!daxdev_mapping_supported(vma, dax_dev))
vma               389 fs/ext4/file.c 		vma->vm_ops = &ext4_dax_vm_ops;
vma               390 fs/ext4/file.c 		vma->vm_flags |= VM_HUGEPAGE;
vma               392 fs/ext4/file.c 		vma->vm_ops = &ext4_file_vm_ops;
vma              6239 fs/ext4/inode.c 	struct vm_area_struct *vma = vmf->vma;
vma              6245 fs/ext4/inode.c 	struct file *file = vma->vm_file;
vma              6256 fs/ext4/inode.c 	file_update_time(vma->vm_file);
vma              6269 fs/ext4/inode.c 			err = block_page_mkwrite(vma, vmf,
vma              6316 fs/ext4/inode.c 	err = block_page_mkwrite(vma, vmf, get_block);
vma              6340 fs/ext4/inode.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma                36 fs/f2fs/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma                51 fs/f2fs/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma                73 fs/f2fs/file.c 	file_update_time(vmf->vma->vm_file);
vma               481 fs/f2fs/file.c static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               495 fs/f2fs/file.c 	vma->vm_ops = &f2fs_file_vm_ops;
vma              2256 fs/fuse/file.c static void fuse_vma_close(struct vm_area_struct *vma)
vma              2258 fs/fuse/file.c 	filemap_write_and_wait(vma->vm_file->f_mapping);
vma              2279 fs/fuse/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma              2281 fs/fuse/file.c 	file_update_time(vmf->vma->vm_file);
vma              2299 fs/fuse/file.c static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
vma              2305 fs/fuse/file.c 		if (vma->vm_flags & VM_MAYSHARE)
vma              2310 fs/fuse/file.c 		return generic_file_mmap(file, vma);
vma              2313 fs/fuse/file.c 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
vma              2317 fs/fuse/file.c 	vma->vm_ops = &fuse_file_vm_ops;
vma               448 fs/gfs2/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma               465 fs/gfs2/file.c 	gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
vma               473 fs/gfs2/file.c 	file_update_time(vmf->vma->vm_file);
vma               572 fs/gfs2/file.c static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
vma               589 fs/gfs2/file.c 	vma->vm_ops = &gfs2_vm_ops;
vma                93 fs/hugetlbfs/inode.c static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
vma                96 fs/hugetlbfs/inode.c 	vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
vma               100 fs/hugetlbfs/inode.c static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
vma               102 fs/hugetlbfs/inode.c 	mpol_cond_put(vma->vm_policy);
vma               105 fs/hugetlbfs/inode.c static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
vma               110 fs/hugetlbfs/inode.c static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
vma               135 fs/hugetlbfs/inode.c static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               150 fs/hugetlbfs/inode.c 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
vma               151 fs/hugetlbfs/inode.c 	vma->vm_ops = &hugetlb_vm_ops;
vma               160 fs/hugetlbfs/inode.c 		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
vma               165 fs/hugetlbfs/inode.c 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
vma               168 fs/hugetlbfs/inode.c 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
vma               169 fs/hugetlbfs/inode.c 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vma               179 fs/hugetlbfs/inode.c 				vma->vm_pgoff >> huge_page_order(h),
vma               180 fs/hugetlbfs/inode.c 				len >> huge_page_shift(h), vma,
vma               181 fs/hugetlbfs/inode.c 				vma->vm_flags))
vma               185 fs/hugetlbfs/inode.c 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
vma               203 fs/hugetlbfs/inode.c 	struct vm_area_struct *vma;
vma               220 fs/hugetlbfs/inode.c 		vma = find_vma(mm, addr);
vma               222 fs/hugetlbfs/inode.c 		    (!vma || addr + len <= vm_start_gap(vma)))
vma               358 fs/hugetlbfs/inode.c 	struct vm_area_struct *vma;
vma               364 fs/hugetlbfs/inode.c 	vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
vma               374 fs/hugetlbfs/inode.c 		if (vma->vm_pgoff < start)
vma               375 fs/hugetlbfs/inode.c 			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
vma               380 fs/hugetlbfs/inode.c 			v_end = vma->vm_end;
vma               382 fs/hugetlbfs/inode.c 			v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
vma               383 fs/hugetlbfs/inode.c 							+ vma->vm_start;
vma               384 fs/hugetlbfs/inode.c 			if (v_end > vma->vm_end)
vma               385 fs/hugetlbfs/inode.c 				v_end = vma->vm_end;
vma               388 fs/hugetlbfs/inode.c 		unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
vma              3534 fs/io_uring.c  				struct vm_area_struct *vma = vmas[j];
vma              3536 fs/io_uring.c  				if (vma->vm_file &&
vma              3537 fs/io_uring.c  				    !is_file_hugepages(vma->vm_file)) {
vma              3700 fs/io_uring.c  static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
vma              3702 fs/io_uring.c  	loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
vma              3703 fs/io_uring.c  	unsigned long sz = vma->vm_end - vma->vm_start;
vma              3726 fs/io_uring.c  	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
vma              1036 fs/iomap/buffered-io.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma               333 fs/kernfs/file.c static void kernfs_vma_open(struct vm_area_struct *vma)
vma               335 fs/kernfs/file.c 	struct file *file = vma->vm_file;
vma               345 fs/kernfs/file.c 		of->vm_ops->open(vma);
vma               352 fs/kernfs/file.c 	struct file *file = vmf->vma->vm_file;
vma               372 fs/kernfs/file.c 	struct file *file = vmf->vma->vm_file;
vma               392 fs/kernfs/file.c static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
vma               395 fs/kernfs/file.c 	struct file *file = vma->vm_file;
vma               407 fs/kernfs/file.c 		ret = of->vm_ops->access(vma, addr, buf, len, write);
vma               414 fs/kernfs/file.c static int kernfs_vma_set_policy(struct vm_area_struct *vma,
vma               417 fs/kernfs/file.c 	struct file *file = vma->vm_file;
vma               429 fs/kernfs/file.c 		ret = of->vm_ops->set_policy(vma, new);
vma               435 fs/kernfs/file.c static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
vma               438 fs/kernfs/file.c 	struct file *file = vma->vm_file;
vma               443 fs/kernfs/file.c 		return vma->vm_policy;
vma               446 fs/kernfs/file.c 		return vma->vm_policy;
vma               448 fs/kernfs/file.c 	pol = vma->vm_policy;
vma               450 fs/kernfs/file.c 		pol = of->vm_ops->get_policy(vma, addr);
vma               469 fs/kernfs/file.c static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
vma               492 fs/kernfs/file.c 	rc = ops->mmap(of, vma);
vma               501 fs/kernfs/file.c 	if (vma->vm_file != file)
vma               505 fs/kernfs/file.c 	if (of->mmapped && of->vm_ops != vma->vm_ops)
vma               513 fs/kernfs/file.c 	if (vma->vm_ops && vma->vm_ops->close)
vma               518 fs/kernfs/file.c 	of->vm_ops = vma->vm_ops;
vma               519 fs/kernfs/file.c 	vma->vm_ops = &kernfs_vm_ops;
vma               180 fs/nfs/file.c  nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
vma               190 fs/nfs/file.c  	status = generic_file_mmap(file, vma);
vma               192 fs/nfs/file.c  		vma->vm_ops = &nfs_file_vm_ops;
vma               535 fs/nfs/file.c  	struct file *filp = vmf->vma->vm_file;
vma                47 fs/nilfs2/file.c 	struct vm_area_struct *vma = vmf->vma;
vma                49 fs/nilfs2/file.c 	struct inode *inode = file_inode(vma->vm_file);
vma                98 fs/nilfs2/file.c 	file_update_time(vma->vm_file);
vma                99 fs/nilfs2/file.c 	ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
vma               120 fs/nilfs2/file.c static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               123 fs/nilfs2/file.c 	vma->vm_ops = &nilfs_file_vm_ops;
vma                35 fs/ocfs2/mmap.c 	struct vm_area_struct *vma = vmf->vma;
vma                43 fs/ocfs2/mmap.c 	trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno,
vma                44 fs/ocfs2/mmap.c 			  vma, vmf->page, vmf->pgoff);
vma               118 fs/ocfs2/mmap.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma               146 fs/ocfs2/mmap.c 	ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
vma               164 fs/ocfs2/mmap.c int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
vma               176 fs/ocfs2/mmap.c 	vma->vm_ops = &ocfs2_file_vm_ops;
vma                 5 fs/ocfs2/mmap.h int ocfs2_mmap(struct file *file, struct vm_area_struct *vma);
vma               438 fs/orangefs/file.c 	struct file *file = vmf->vma->vm_file;
vma               461 fs/orangefs/file.c static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma               476 fs/orangefs/file.c 	vma->vm_flags |= VM_SEQ_READ;
vma               477 fs/orangefs/file.c 	vma->vm_flags &= ~VM_RAND_READ;
vma               480 fs/orangefs/file.c 	vma->vm_ops = &orangefs_file_vm_ops;
vma               722 fs/orangefs/inode.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma               775 fs/orangefs/inode.c 	file_update_time(vmf->vma->vm_file);
vma               316 fs/overlayfs/file.c static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
vma               325 fs/overlayfs/file.c 	if (WARN_ON(file != vma->vm_file))
vma               328 fs/overlayfs/file.c 	vma->vm_file = get_file(realfile);
vma               331 fs/overlayfs/file.c 	ret = call_mmap(vma->vm_file, vma);
vma              2014 fs/proc/base.c 	struct vm_area_struct *vma;
vma              2038 fs/proc/base.c 	vma = find_exact_vma(mm, vm_start, vm_end);
vma              2039 fs/proc/base.c 	if (vma && vma->vm_file) {
vma              2040 fs/proc/base.c 		*path = vma->vm_file->f_path;
vma              2111 fs/proc/base.c 	struct vm_area_struct *vma;
vma              2138 fs/proc/base.c 	vma = find_exact_vma(mm, vm_start, vm_end);
vma              2139 fs/proc/base.c 	if (!vma)
vma              2142 fs/proc/base.c 	if (vma->vm_file)
vma              2144 fs/proc/base.c 				(void *)(unsigned long)vma->vm_file->f_mode);
vma              2165 fs/proc/base.c 	struct vm_area_struct *vma;
vma              2210 fs/proc/base.c 	for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
vma              2211 fs/proc/base.c 		if (!vma->vm_file)
vma              2224 fs/proc/base.c 		p->start = vma->vm_start;
vma              2225 fs/proc/base.c 		p->end = vma->vm_end;
vma              2226 fs/proc/base.c 		p->mode = vma->vm_file->f_mode;
vma               291 fs/proc/inode.c static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
vma               300 fs/proc/inode.c 			rv = mmap(file, vma);
vma               136 fs/proc/task_mmu.c m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
vma               138 fs/proc/task_mmu.c 	if (vma == priv->tail_vma)
vma               140 fs/proc/task_mmu.c 	return vma->vm_next ?: priv->tail_vma;
vma               143 fs/proc/task_mmu.c static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
vma               146 fs/proc/task_mmu.c 		m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
vma               154 fs/proc/task_mmu.c 	struct vm_area_struct *vma;
vma               178 fs/proc/task_mmu.c 		vma = find_vma(mm, last_addr - 1);
vma               179 fs/proc/task_mmu.c 		if (vma && vma->vm_start <= last_addr)
vma               180 fs/proc/task_mmu.c 			vma = m_next_vma(priv, vma);
vma               181 fs/proc/task_mmu.c 		if (vma)
vma               182 fs/proc/task_mmu.c 			return vma;
vma               187 fs/proc/task_mmu.c 		for (vma = mm->mmap; pos; pos--) {
vma               188 fs/proc/task_mmu.c 			m->version = vma->vm_start;
vma               189 fs/proc/task_mmu.c 			vma = vma->vm_next;
vma               191 fs/proc/task_mmu.c 		return vma;
vma               268 fs/proc/task_mmu.c static int is_stack(struct vm_area_struct *vma)
vma               275 fs/proc/task_mmu.c 	return vma->vm_start <= vma->vm_mm->start_stack &&
vma               276 fs/proc/task_mmu.c 		vma->vm_end >= vma->vm_mm->start_stack;
vma               300 fs/proc/task_mmu.c show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
vma               302 fs/proc/task_mmu.c 	struct mm_struct *mm = vma->vm_mm;
vma               303 fs/proc/task_mmu.c 	struct file *file = vma->vm_file;
vma               304 fs/proc/task_mmu.c 	vm_flags_t flags = vma->vm_flags;
vma               312 fs/proc/task_mmu.c 		struct inode *inode = file_inode(vma->vm_file);
vma               315 fs/proc/task_mmu.c 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
vma               318 fs/proc/task_mmu.c 	start = vma->vm_start;
vma               319 fs/proc/task_mmu.c 	end = vma->vm_end;
vma               332 fs/proc/task_mmu.c 	if (vma->vm_ops && vma->vm_ops->name) {
vma               333 fs/proc/task_mmu.c 		name = vma->vm_ops->name(vma);
vma               338 fs/proc/task_mmu.c 	name = arch_vma_name(vma);
vma               345 fs/proc/task_mmu.c 		if (vma->vm_start <= mm->brk &&
vma               346 fs/proc/task_mmu.c 		    vma->vm_end >= mm->start_brk) {
vma               351 fs/proc/task_mmu.c 		if (is_stack(vma))
vma               513 fs/proc/task_mmu.c 			walk->vma->vm_file->f_mapping, addr, end);
vma               525 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma               526 fs/proc/task_mmu.c 	bool locked = !!(vma->vm_flags & VM_LOCKED);
vma               530 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, *pte);
vma               553 fs/proc/task_mmu.c 		page = find_get_entry(vma->vm_file->f_mapping,
vma               554 fs/proc/task_mmu.c 						linear_page_index(vma, addr));
vma               577 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma               578 fs/proc/task_mmu.c 	bool locked = !!(vma->vm_flags & VM_LOCKED);
vma               582 fs/proc/task_mmu.c 	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
vma               605 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma               609 fs/proc/task_mmu.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma               624 fs/proc/task_mmu.c 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma               633 fs/proc/task_mmu.c static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
vma               697 fs/proc/task_mmu.c 		if (vma->vm_flags & (1UL << i)) {
vma               712 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma               716 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, *pte);
vma               729 fs/proc/task_mmu.c 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
vma               731 fs/proc/task_mmu.c 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
vma               750 fs/proc/task_mmu.c static void smap_gather_stats(struct vm_area_struct *vma,
vma               756 fs/proc/task_mmu.c 	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
vma               767 fs/proc/task_mmu.c 		unsigned long shmem_swapped = shmem_swap_usage(vma);
vma               769 fs/proc/task_mmu.c 		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
vma               770 fs/proc/task_mmu.c 					!(vma->vm_flags & VM_WRITE)) {
vma               774 fs/proc/task_mmu.c 			walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
vma               780 fs/proc/task_mmu.c 	walk_page_vma(vma, &smaps_walk_ops, mss);
vma               827 fs/proc/task_mmu.c 	struct vm_area_struct *vma = v;
vma               832 fs/proc/task_mmu.c 	smap_gather_stats(vma, &mss);
vma               834 fs/proc/task_mmu.c 	show_map_vma(m, vma);
vma               836 fs/proc/task_mmu.c 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
vma               837 fs/proc/task_mmu.c 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
vma               838 fs/proc/task_mmu.c 	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
vma               844 fs/proc/task_mmu.c 		   transparent_hugepage_enabled(vma));
vma               847 fs/proc/task_mmu.c 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
vma               848 fs/proc/task_mmu.c 	show_smap_vma_flags(m, vma);
vma               850 fs/proc/task_mmu.c 	m_cache_vma(m, vma);
vma               860 fs/proc/task_mmu.c 	struct vm_area_struct *vma;
vma               882 fs/proc/task_mmu.c 	for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
vma               883 fs/proc/task_mmu.c 		smap_gather_stats(vma, &mss);
vma               884 fs/proc/task_mmu.c 		last_vma_end = vma->vm_end;
vma               988 fs/proc/task_mmu.c static inline void clear_soft_dirty(struct vm_area_struct *vma,
vma              1002 fs/proc/task_mmu.c 		old_pte = ptep_modify_prot_start(vma, addr, pte);
vma              1005 fs/proc/task_mmu.c 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
vma              1008 fs/proc/task_mmu.c 		set_pte_at(vma->vm_mm, addr, pte, ptent);
vma              1012 fs/proc/task_mmu.c static inline void clear_soft_dirty(struct vm_area_struct *vma,
vma              1019 fs/proc/task_mmu.c static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
vma              1026 fs/proc/task_mmu.c 		old = pmdp_invalidate(vma, addr, pmdp);
vma              1035 fs/proc/task_mmu.c 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
vma              1038 fs/proc/task_mmu.c 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
vma              1042 fs/proc/task_mmu.c static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
vma              1052 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma              1057 fs/proc/task_mmu.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma              1060 fs/proc/task_mmu.c 			clear_soft_dirty_pmd(vma, addr, pmd);
vma              1070 fs/proc/task_mmu.c 		pmdp_test_and_clear_young(vma, addr, pmd);
vma              1081 fs/proc/task_mmu.c 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma              1086 fs/proc/task_mmu.c 			clear_soft_dirty(vma, addr, pte);
vma              1093 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, ptent);
vma              1098 fs/proc/task_mmu.c 		ptep_test_and_clear_young(vma, addr, pte);
vma              1111 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma              1113 fs/proc/task_mmu.c 	if (vma->vm_flags & VM_PFNMAP)
vma              1122 fs/proc/task_mmu.c 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
vma              1124 fs/proc/task_mmu.c 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
vma              1140 fs/proc/task_mmu.c 	struct vm_area_struct *vma;
vma              1189 fs/proc/task_mmu.c 			for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              1190 fs/proc/task_mmu.c 				if (!(vma->vm_flags & VM_SOFTDIRTY))
vma              1215 fs/proc/task_mmu.c 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              1216 fs/proc/task_mmu.c 					vma->vm_flags &= ~VM_SOFTDIRTY;
vma              1217 fs/proc/task_mmu.c 					vma_set_page_prot(vma);
vma              1292 fs/proc/task_mmu.c 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
vma              1297 fs/proc/task_mmu.c 		if (vma)
vma              1298 fs/proc/task_mmu.c 			hole_end = min(end, vma->vm_start);
vma              1308 fs/proc/task_mmu.c 		if (!vma)
vma              1312 fs/proc/task_mmu.c 		if (vma->vm_flags & VM_SOFTDIRTY)
vma              1314 fs/proc/task_mmu.c 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
vma              1325 fs/proc/task_mmu.c 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
vma              1334 fs/proc/task_mmu.c 		page = vm_normal_page(vma, addr, pte);
vma              1357 fs/proc/task_mmu.c 	if (vma->vm_flags & VM_SOFTDIRTY)
vma              1366 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma              1373 fs/proc/task_mmu.c 	ptl = pmd_trans_huge_lock(pmdp, vma);
vma              1379 fs/proc/task_mmu.c 		if (vma->vm_flags & VM_SOFTDIRTY)
vma              1443 fs/proc/task_mmu.c 		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
vma              1462 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma              1467 fs/proc/task_mmu.c 	if (vma->vm_flags & VM_SOFTDIRTY)
vma              1694 fs/proc/task_mmu.c static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
vma              1703 fs/proc/task_mmu.c 	page = vm_normal_page(vma, addr, pte);
vma              1719 fs/proc/task_mmu.c 					      struct vm_area_struct *vma,
vma              1728 fs/proc/task_mmu.c 	page = vm_normal_page_pmd(vma, addr, pmd);
vma              1747 fs/proc/task_mmu.c 	struct vm_area_struct *vma = walk->vma;
vma              1753 fs/proc/task_mmu.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma              1757 fs/proc/task_mmu.c 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
vma              1770 fs/proc/task_mmu.c 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
vma              1820 fs/proc/task_mmu.c 	struct vm_area_struct *vma = v;
vma              1822 fs/proc/task_mmu.c 	struct file *file = vma->vm_file;
vma              1823 fs/proc/task_mmu.c 	struct mm_struct *mm = vma->vm_mm;
vma              1834 fs/proc/task_mmu.c 	pol = __get_vma_policy(vma, vma->vm_start);
vma              1842 fs/proc/task_mmu.c 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
vma              1847 fs/proc/task_mmu.c 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
vma              1849 fs/proc/task_mmu.c 	} else if (is_stack(vma)) {
vma              1853 fs/proc/task_mmu.c 	if (is_vm_hugetlb_page(vma))
vma              1857 fs/proc/task_mmu.c 	walk_page_vma(vma, &show_numa_ops, md);
vma              1877 fs/proc/task_mmu.c 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
vma              1887 fs/proc/task_mmu.c 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
vma              1890 fs/proc/task_mmu.c 	m_cache_vma(m, vma);
vma                23 fs/proc/task_nommu.c 	struct vm_area_struct *vma;
vma                30 fs/proc/task_nommu.c 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
vma                32 fs/proc/task_nommu.c 		bytes += kobjsize(vma);
vma                34 fs/proc/task_nommu.c 		region = vma->vm_region;
vma                39 fs/proc/task_nommu.c 			size = vma->vm_end - vma->vm_start;
vma                43 fs/proc/task_nommu.c 		    vma->vm_flags & VM_MAYSHARE) {
vma                48 fs/proc/task_nommu.c 				slack = region->vm_end - vma->vm_end;
vma                85 fs/proc/task_nommu.c 	struct vm_area_struct *vma;
vma                91 fs/proc/task_nommu.c 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
vma                92 fs/proc/task_nommu.c 		vsize += vma->vm_end - vma->vm_start;
vma               102 fs/proc/task_nommu.c 	struct vm_area_struct *vma;
vma               109 fs/proc/task_nommu.c 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
vma               110 fs/proc/task_nommu.c 		size += kobjsize(vma);
vma               111 fs/proc/task_nommu.c 		region = vma->vm_region;
vma               129 fs/proc/task_nommu.c static int is_stack(struct vm_area_struct *vma)
vma               131 fs/proc/task_nommu.c 	struct mm_struct *mm = vma->vm_mm;
vma               138 fs/proc/task_nommu.c 	return vma->vm_start <= mm->start_stack &&
vma               139 fs/proc/task_nommu.c 		vma->vm_end >= mm->start_stack;
vma               145 fs/proc/task_nommu.c static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
vma               147 fs/proc/task_nommu.c 	struct mm_struct *mm = vma->vm_mm;
vma               154 fs/proc/task_nommu.c 	flags = vma->vm_flags;
vma               155 fs/proc/task_nommu.c 	file = vma->vm_file;
vma               158 fs/proc/task_nommu.c 		struct inode *inode = file_inode(vma->vm_file);
vma               161 fs/proc/task_nommu.c 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
vma               167 fs/proc/task_nommu.c 		   vma->vm_start,
vma               168 fs/proc/task_nommu.c 		   vma->vm_end,
vma               179 fs/proc/task_nommu.c 	} else if (mm && is_stack(vma)) {
vma               187 fs/proc/vmcore.c int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
vma               192 fs/proc/vmcore.c 	return remap_pfn_range(vma, from, pfn, size, prot);
vma               255 fs/proc/vmcore.c static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
vma               269 fs/proc/vmcore.c 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
vma               415 fs/proc/vmcore.c 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
vma               487 fs/proc/vmcore.c static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
vma               509 fs/proc/vmcore.c 				if (remap_oldmem_pfn_range(vma, from + len,
vma               516 fs/proc/vmcore.c 			if (remap_oldmem_pfn_range(vma, from + len,
vma               527 fs/proc/vmcore.c 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
vma               533 fs/proc/vmcore.c 	do_munmap(vma->vm_mm, from, len, NULL);
vma               537 fs/proc/vmcore.c static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
vma               546 fs/proc/vmcore.c 		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
vma               548 fs/proc/vmcore.c 		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
vma               551 fs/proc/vmcore.c static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
vma               553 fs/proc/vmcore.c 	size_t size = vma->vm_end - vma->vm_start;
vma               557 fs/proc/vmcore.c 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
vma               563 fs/proc/vmcore.c 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
vma               566 fs/proc/vmcore.c 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
vma               567 fs/proc/vmcore.c 	vma->vm_flags |= VM_MIXEDMAP;
vma               568 fs/proc/vmcore.c 	vma->vm_ops = &vmcore_mmap_ops;
vma               577 fs/proc/vmcore.c 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
vma               578 fs/proc/vmcore.c 				    vma->vm_page_prot))
vma               610 fs/proc/vmcore.c 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
vma               627 fs/proc/vmcore.c 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
vma               646 fs/proc/vmcore.c 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
vma               648 fs/proc/vmcore.c 						    vma->vm_page_prot))
vma               661 fs/proc/vmcore.c 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
vma               665 fs/proc/vmcore.c static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
vma                31 fs/ramfs/file-nommu.c static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
vma               256 fs/ramfs/file-nommu.c static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
vma               258 fs/ramfs/file-nommu.c 	if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
vma               262 fs/ramfs/file-nommu.c 	vma->vm_ops = &generic_file_vm_ops;
vma                64 fs/romfs/mmap-nommu.c static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
vma                66 fs/romfs/mmap-nommu.c 	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
vma               164 fs/sysfs/file.c 			     struct vm_area_struct *vma)
vma               169 fs/sysfs/file.c 	return battr->mmap(of->file, kobj, battr, vma);
vma              1510 fs/ubifs/file.c 	struct inode *inode = file_inode(vmf->vma->vm_file);
vma              1602 fs/ubifs/file.c static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma              1606 fs/ubifs/file.c 	err = generic_file_mmap(file, vma);
vma              1609 fs/ubifs/file.c 	vma->vm_ops = &ubifs_file_vm_ops;
vma               228 fs/userfaultfd.c 					 struct vm_area_struct *vma,
vma               239 fs/userfaultfd.c 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
vma               260 fs/userfaultfd.c 					 struct vm_area_struct *vma,
vma               354 fs/userfaultfd.c 	struct mm_struct *mm = vmf->vma->vm_mm;
vma               381 fs/userfaultfd.c 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
vma               485 fs/userfaultfd.c 	if (!is_vm_hugetlb_page(vmf->vma))
vma               489 fs/userfaultfd.c 		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
vma               637 fs/userfaultfd.c 		struct vm_area_struct *vma;
vma               644 fs/userfaultfd.c 		for (vma = mm->mmap; vma; vma = vma->vm_next)
vma               645 fs/userfaultfd.c 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma               646 fs/userfaultfd.c 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma               647 fs/userfaultfd.c 				vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
vma               671 fs/userfaultfd.c int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
vma               676 fs/userfaultfd.c 	octx = vma->vm_userfaultfd_ctx.ctx;
vma               678 fs/userfaultfd.c 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma               679 fs/userfaultfd.c 		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
vma               706 fs/userfaultfd.c 		ctx->mm = vma->vm_mm;
vma               716 fs/userfaultfd.c 	vma->vm_userfaultfd_ctx.ctx = ctx;
vma               744 fs/userfaultfd.c void mremap_userfaultfd_prep(struct vm_area_struct *vma,
vma               749 fs/userfaultfd.c 	ctx = vma->vm_userfaultfd_ctx.ctx;
vma               760 fs/userfaultfd.c 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma               761 fs/userfaultfd.c 		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
vma               790 fs/userfaultfd.c bool userfaultfd_remove(struct vm_area_struct *vma,
vma               793 fs/userfaultfd.c 	struct mm_struct *mm = vma->vm_mm;
vma               797 fs/userfaultfd.c 	ctx = vma->vm_userfaultfd_ctx.ctx;
vma               829 fs/userfaultfd.c int userfaultfd_unmap_prep(struct vm_area_struct *vma,
vma               833 fs/userfaultfd.c 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
vma               835 fs/userfaultfd.c 		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
vma               879 fs/userfaultfd.c 	struct vm_area_struct *vma, *prev;
vma               901 fs/userfaultfd.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               903 fs/userfaultfd.c 		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
vma               904 fs/userfaultfd.c 		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
vma               905 fs/userfaultfd.c 		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
vma               906 fs/userfaultfd.c 			prev = vma;
vma               909 fs/userfaultfd.c 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
vma               911 fs/userfaultfd.c 			prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
vma               912 fs/userfaultfd.c 					 new_flags, vma->anon_vma,
vma               913 fs/userfaultfd.c 					 vma->vm_file, vma->vm_pgoff,
vma               914 fs/userfaultfd.c 					 vma_policy(vma),
vma               917 fs/userfaultfd.c 				vma = prev;
vma               919 fs/userfaultfd.c 				prev = vma;
vma               921 fs/userfaultfd.c 		vma->vm_flags = new_flags;
vma               922 fs/userfaultfd.c 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma              1296 fs/userfaultfd.c static inline bool vma_can_userfault(struct vm_area_struct *vma)
vma              1298 fs/userfaultfd.c 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
vma              1299 fs/userfaultfd.c 		vma_is_shmem(vma);
vma              1306 fs/userfaultfd.c 	struct vm_area_struct *vma, *prev, *cur;
vma              1356 fs/userfaultfd.c 	vma = find_vma_prev(mm, start, &prev);
vma              1357 fs/userfaultfd.c 	if (!vma)
vma              1362 fs/userfaultfd.c 	if (vma->vm_start >= end)
vma              1369 fs/userfaultfd.c 	if (is_vm_hugetlb_page(vma)) {
vma              1370 fs/userfaultfd.c 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
vma              1381 fs/userfaultfd.c 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
vma              1439 fs/userfaultfd.c 	if (vma->vm_start < start)
vma              1440 fs/userfaultfd.c 		prev = vma;
vma              1446 fs/userfaultfd.c 		BUG_ON(!vma_can_userfault(vma));
vma              1447 fs/userfaultfd.c 		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
vma              1448 fs/userfaultfd.c 		       vma->vm_userfaultfd_ctx.ctx != ctx);
vma              1449 fs/userfaultfd.c 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
vma              1455 fs/userfaultfd.c 		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
vma              1456 fs/userfaultfd.c 		    (vma->vm_flags & vm_flags) == vm_flags)
vma              1459 fs/userfaultfd.c 		if (vma->vm_start > start)
vma              1460 fs/userfaultfd.c 			start = vma->vm_start;
vma              1461 fs/userfaultfd.c 		vma_end = min(end, vma->vm_end);
vma              1463 fs/userfaultfd.c 		new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
vma              1465 fs/userfaultfd.c 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma              1466 fs/userfaultfd.c 				 vma_policy(vma),
vma              1469 fs/userfaultfd.c 			vma = prev;
vma              1472 fs/userfaultfd.c 		if (vma->vm_start < start) {
vma              1473 fs/userfaultfd.c 			ret = split_vma(mm, vma, start, 1);
vma              1477 fs/userfaultfd.c 		if (vma->vm_end > end) {
vma              1478 fs/userfaultfd.c 			ret = split_vma(mm, vma, end, 0);
vma              1488 fs/userfaultfd.c 		vma->vm_flags = new_flags;
vma              1489 fs/userfaultfd.c 		vma->vm_userfaultfd_ctx.ctx = ctx;
vma              1492 fs/userfaultfd.c 		prev = vma;
vma              1493 fs/userfaultfd.c 		start = vma->vm_end;
vma              1494 fs/userfaultfd.c 		vma = vma->vm_next;
vma              1495 fs/userfaultfd.c 	} while (vma && vma->vm_start < end);
vma              1518 fs/userfaultfd.c 	struct vm_area_struct *vma, *prev, *cur;
vma              1545 fs/userfaultfd.c 	vma = find_vma_prev(mm, start, &prev);
vma              1546 fs/userfaultfd.c 	if (!vma)
vma              1551 fs/userfaultfd.c 	if (vma->vm_start >= end)
vma              1558 fs/userfaultfd.c 	if (is_vm_hugetlb_page(vma)) {
vma              1559 fs/userfaultfd.c 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
vma              1570 fs/userfaultfd.c 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
vma              1590 fs/userfaultfd.c 	if (vma->vm_start < start)
vma              1591 fs/userfaultfd.c 		prev = vma;
vma              1597 fs/userfaultfd.c 		BUG_ON(!vma_can_userfault(vma));
vma              1603 fs/userfaultfd.c 		if (!vma->vm_userfaultfd_ctx.ctx)
vma              1606 fs/userfaultfd.c 		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
vma              1608 fs/userfaultfd.c 		if (vma->vm_start > start)
vma              1609 fs/userfaultfd.c 			start = vma->vm_start;
vma              1610 fs/userfaultfd.c 		vma_end = min(end, vma->vm_end);
vma              1612 fs/userfaultfd.c 		if (userfaultfd_missing(vma)) {
vma              1622 fs/userfaultfd.c 			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
vma              1625 fs/userfaultfd.c 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
vma              1627 fs/userfaultfd.c 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma              1628 fs/userfaultfd.c 				 vma_policy(vma),
vma              1631 fs/userfaultfd.c 			vma = prev;
vma              1634 fs/userfaultfd.c 		if (vma->vm_start < start) {
vma              1635 fs/userfaultfd.c 			ret = split_vma(mm, vma, start, 1);
vma              1639 fs/userfaultfd.c 		if (vma->vm_end > end) {
vma              1640 fs/userfaultfd.c 			ret = split_vma(mm, vma, end, 0);
vma              1650 fs/userfaultfd.c 		vma->vm_flags = new_flags;
vma              1651 fs/userfaultfd.c 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma              1654 fs/userfaultfd.c 		prev = vma;
vma              1655 fs/userfaultfd.c 		start = vma->vm_end;
vma              1656 fs/userfaultfd.c 		vma = vma->vm_next;
vma              1657 fs/userfaultfd.c 	} while (vma && vma->vm_start < end);
vma              1144 fs/xfs/xfs_file.c 	struct inode		*inode = file_inode(vmf->vma->vm_file);
vma              1152 fs/xfs/xfs_file.c 		file_update_time(vmf->vma->vm_file);
vma              1181 fs/xfs/xfs_file.c 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
vma              1190 fs/xfs/xfs_file.c 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
vma              1229 fs/xfs/xfs_file.c 	struct vm_area_struct *vma)
vma              1238 fs/xfs/xfs_file.c 	if (!daxdev_mapping_supported(vma, dax_dev))
vma              1242 fs/xfs/xfs_file.c 	vma->vm_ops = &xfs_file_vm_ops;
vma              1244 fs/xfs/xfs_file.c 		vma->vm_flags |= VM_HUGEPAGE;
vma                33 include/asm-generic/cacheflush.h static inline void flush_cache_range(struct vm_area_struct *vma,
vma                41 include/asm-generic/cacheflush.h static inline void flush_cache_page(struct vm_area_struct *vma,
vma                73 include/asm-generic/cacheflush.h static inline void flush_icache_page(struct vm_area_struct *vma,
vma                80 include/asm-generic/cacheflush.h static inline void flush_icache_user_range(struct vm_area_struct *vma,
vma               100 include/asm-generic/cacheflush.h #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
vma               103 include/asm-generic/cacheflush.h 		flush_icache_user_range(vma, page, vaddr, len); \
vma               108 include/asm-generic/cacheflush.h #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
vma                69 include/asm-generic/hugetlb.h static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
vma                72 include/asm-generic/hugetlb.h 	ptep_clear_flush(vma, addr, ptep);
vma               114 include/asm-generic/hugetlb.h static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
vma               118 include/asm-generic/hugetlb.h 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
vma                26 include/asm-generic/mm_hooks.h 				     struct vm_area_struct *vma)
vma                30 include/asm-generic/mm_hooks.h static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
vma                30 include/asm-generic/pgtable.h extern int ptep_set_access_flags(struct vm_area_struct *vma,
vma                37 include/asm-generic/pgtable.h extern int pmdp_set_access_flags(struct vm_area_struct *vma,
vma                40 include/asm-generic/pgtable.h extern int pudp_set_access_flags(struct vm_area_struct *vma,
vma                44 include/asm-generic/pgtable.h static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
vma                51 include/asm-generic/pgtable.h static inline int pudp_set_access_flags(struct vm_area_struct *vma,
vma                62 include/asm-generic/pgtable.h static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
vma                71 include/asm-generic/pgtable.h 		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
vma                78 include/asm-generic/pgtable.h static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma                87 include/asm-generic/pgtable.h 		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
vma                91 include/asm-generic/pgtable.h static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
vma               102 include/asm-generic/pgtable.h int ptep_clear_flush_young(struct vm_area_struct *vma,
vma               108 include/asm-generic/pgtable.h extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
vma               115 include/asm-generic/pgtable.h static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
vma               206 include/asm-generic/pgtable.h extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
vma               212 include/asm-generic/pgtable.h extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
vma               215 include/asm-generic/pgtable.h extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
vma               289 include/asm-generic/pgtable.h extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
vma               292 include/asm-generic/pgtable.h static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
vma               318 include/asm-generic/pgtable.h static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
vma               322 include/asm-generic/pgtable.h 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
vma               328 include/asm-generic/pgtable.h extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
vma               451 include/asm-generic/pgtable.h 				     struct vm_area_struct *vma,
vma               469 include/asm-generic/pgtable.h 				  struct vm_area_struct *vma,
vma               490 include/asm-generic/pgtable.h #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
vma               609 include/asm-generic/pgtable.h static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
vma               618 include/asm-generic/pgtable.h 	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
vma               621 include/asm-generic/pgtable.h static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
vma               629 include/asm-generic/pgtable.h 	set_pte_at(vma->vm_mm, addr, ptep, pte);
vma               647 include/asm-generic/pgtable.h static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
vma               651 include/asm-generic/pgtable.h 	return __ptep_modify_prot_start(vma, addr, ptep);
vma               658 include/asm-generic/pgtable.h static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
vma               662 include/asm-generic/pgtable.h 	__ptep_modify_prot_commit(vma, addr, ptep, pte);
vma               805 include/asm-generic/pgtable.h static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
vma               816 include/asm-generic/pgtable.h static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
vma               825 include/asm-generic/pgtable.h static inline int track_pfn_copy(struct vm_area_struct *vma)
vma               835 include/asm-generic/pgtable.h static inline void untrack_pfn(struct vm_area_struct *vma,
vma               843 include/asm-generic/pgtable.h static inline void untrack_pfn_moved(struct vm_area_struct *vma)
vma               847 include/asm-generic/pgtable.h extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
vma               850 include/asm-generic/pgtable.h extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
vma               852 include/asm-generic/pgtable.h extern int track_pfn_copy(struct vm_area_struct *vma);
vma               853 include/asm-generic/pgtable.h extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
vma               855 include/asm-generic/pgtable.h extern void untrack_pfn_moved(struct vm_area_struct *vma);
vma              1112 include/asm-generic/pgtable.h #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
vma              1113 include/asm-generic/pgtable.h #define flush_pud_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
vma              1115 include/asm-generic/pgtable.h #define flush_pmd_tlb_range(vma, addr, end)	BUILD_BUG()
vma              1116 include/asm-generic/pgtable.h #define flush_pud_tlb_range(vma, addr, end)	BUILD_BUG()
vma               343 include/asm-generic/tlb.h tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
vma               346 include/asm-generic/tlb.h static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
vma               366 include/asm-generic/tlb.h 		struct vm_area_struct vma = {
vma               372 include/asm-generic/tlb.h 		flush_tlb_range(&vma, tlb->start, tlb->end);
vma               377 include/asm-generic/tlb.h tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
vma               390 include/asm-generic/tlb.h 	tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
vma               391 include/asm-generic/tlb.h 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
vma               397 include/asm-generic/tlb.h tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
vma               472 include/asm-generic/tlb.h static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
vma               477 include/asm-generic/tlb.h 	tlb_update_vma_flags(tlb, vma);
vma               478 include/asm-generic/tlb.h 	flush_cache_range(vma, vma->vm_start, vma->vm_end);
vma               483 include/asm-generic/tlb.h static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
vma               654 include/drm/drm_drv.h 				struct vm_area_struct *vma);
vma               334 include/drm/drm_gem.h void drm_gem_vm_open(struct vm_area_struct *vma);
vma               335 include/drm/drm_gem.h void drm_gem_vm_close(struct vm_area_struct *vma);
vma               337 include/drm/drm_gem.h 		     struct vm_area_struct *vma);
vma               338 include/drm/drm_gem.h int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vma                80 include/drm/drm_gem_cma_helper.h int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
vma               105 include/drm/drm_gem_cma_helper.h 			   struct vm_area_struct *vma);
vma               159 include/drm/drm_gem_shmem_helper.h int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
vma               166 include/drm/drm_legacy.h int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
vma                90 include/drm/drm_prime.h int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
vma                91 include/drm/drm_prime.h int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
vma                68 include/drm/drm_vram_mm_helper.h int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
vma                85 include/drm/drm_vram_mm_helper.h 	struct file *filp, struct vm_area_struct *vma);
vma               722 include/drm/ttm/ttm_bo_api.h int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
vma               734 include/drm/ttm/ttm_bo_api.h int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma                19 include/linux/binfmts.h 	struct vm_area_struct *vma;
vma               244 include/linux/buffer_head.h int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
vma                23 include/linux/crash_dump.h extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
vma                62 include/linux/dax.h static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
vma                65 include/linux/dax.h 	if (!(vma->vm_flags & VM_SYNC))
vma                67 include/linux/dax.h 	if (!IS_DAX(file_inode(vma->vm_file)))
vma               105 include/linux/dax.h static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
vma               108 include/linux/dax.h 	return !(vma->vm_flags & VM_SYNC);
vma                45 include/linux/dm-io.h 		void *vma;
vma               235 include/linux/dma-buf.h 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
vma               162 include/linux/dma-mapping.h int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
vma               167 include/linux/dma-mapping.h int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
vma               173 include/linux/dma-mapping.h #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
vma               186 include/linux/dma-mapping.h static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
vma               458 include/linux/dma-mapping.h int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
vma               547 include/linux/dma-mapping.h static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
vma               624 include/linux/dma-mapping.h extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
vma               800 include/linux/dma-mapping.h 			      struct vm_area_struct *vma,
vma               804 include/linux/dma-mapping.h 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
vma               284 include/linux/fb.h 	int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma);
vma               661 include/linux/fb.h int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
vma              1906 include/linux/fs.h static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
vma              1908 include/linux/fs.h 	return file->f_op->mmap(file, vma);
vma              3371 include/linux/fs.h static inline bool vma_is_dax(struct vm_area_struct *vma)
vma              3373 include/linux/fs.h 	return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
vma              3376 include/linux/fs.h static inline bool vma_is_fsdax(struct vm_area_struct *vma)
vma              3380 include/linux/fs.h 	if (!vma->vm_file)
vma              3382 include/linux/fs.h 	if (!vma_is_dax(vma))
vma              3384 include/linux/fs.h 	inode = file_inode(vma->vm_file);
vma               535 include/linux/gfp.h 			struct vm_area_struct *vma, unsigned long addr,
vma               537 include/linux/gfp.h #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
vma               538 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
vma               542 include/linux/gfp.h #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
vma               544 include/linux/gfp.h #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
vma               548 include/linux/gfp.h #define alloc_page_vma(gfp_mask, vma, addr)			\
vma               549 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
vma               550 include/linux/gfp.h #define alloc_page_vma_node(gfp_mask, vma, addr, node)		\
vma               551 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
vma                15 include/linux/highmem.h static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
vma               183 include/linux/highmem.h 			struct vm_area_struct *vma,
vma               187 include/linux/highmem.h 			vma, vaddr);
vma               205 include/linux/highmem.h alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
vma               208 include/linux/highmem.h 	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
vma               251 include/linux/highmem.h 	unsigned long vaddr, struct vm_area_struct *vma)
vma                13 include/linux/huge_mm.h 			 struct vm_area_struct *vma);
vma                17 include/linux/huge_mm.h 			 struct vm_area_struct *vma);
vma                28 include/linux/huge_mm.h extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
vma                33 include/linux/huge_mm.h 			struct vm_area_struct *vma,
vma                36 include/linux/huge_mm.h 			struct vm_area_struct *vma,
vma                39 include/linux/huge_mm.h 			struct vm_area_struct *vma,
vma                41 include/linux/huge_mm.h extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
vma                44 include/linux/huge_mm.h extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
vma                47 include/linux/huge_mm.h extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
vma                90 include/linux/huge_mm.h extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
vma                98 include/linux/huge_mm.h static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
vma               100 include/linux/huge_mm.h 	if (vma->vm_flags & VM_NOHUGEPAGE)
vma               103 include/linux/huge_mm.h 	if (is_vma_temporary_stack(vma))
vma               106 include/linux/huge_mm.h 	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
vma               117 include/linux/huge_mm.h 	if (vma_is_dax(vma))
vma               122 include/linux/huge_mm.h 		return !!(vma->vm_flags & VM_HUGEPAGE);
vma               127 include/linux/huge_mm.h bool transparent_hugepage_enabled(struct vm_area_struct *vma);
vma               131 include/linux/huge_mm.h static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
vma               135 include/linux/huge_mm.h 	if (!vma_is_anonymous(vma)) {
vma               136 include/linux/huge_mm.h 		if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
vma               137 include/linux/huge_mm.h 			(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
vma               141 include/linux/huge_mm.h 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
vma               172 include/linux/huge_mm.h void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
vma               185 include/linux/huge_mm.h void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
vma               188 include/linux/huge_mm.h void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
vma               199 include/linux/huge_mm.h extern int hugepage_madvise(struct vm_area_struct *vma,
vma               201 include/linux/huge_mm.h extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
vma               206 include/linux/huge_mm.h 		struct vm_area_struct *vma);
vma               208 include/linux/huge_mm.h 		struct vm_area_struct *vma);
vma               217 include/linux/huge_mm.h 		struct vm_area_struct *vma)
vma               219 include/linux/huge_mm.h 	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
vma               221 include/linux/huge_mm.h 		return __pmd_trans_huge_lock(pmd, vma);
vma               226 include/linux/huge_mm.h 		struct vm_area_struct *vma)
vma               228 include/linux/huge_mm.h 	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
vma               230 include/linux/huge_mm.h 		return __pud_trans_huge_lock(pud, vma);
vma               241 include/linux/huge_mm.h struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               243 include/linux/huge_mm.h struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
vma               295 include/linux/huge_mm.h static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
vma               300 include/linux/huge_mm.h static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
vma               305 include/linux/huge_mm.h static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
vma               336 include/linux/huge_mm.h static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
vma               338 include/linux/huge_mm.h static inline void split_huge_pmd_address(struct vm_area_struct *vma,
vma               344 include/linux/huge_mm.h static inline int hugepage_madvise(struct vm_area_struct *vma,
vma               350 include/linux/huge_mm.h static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
vma               361 include/linux/huge_mm.h 		struct vm_area_struct *vma)
vma               366 include/linux/huge_mm.h 		struct vm_area_struct *vma)
vma               392 include/linux/huge_mm.h static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
vma               398 include/linux/huge_mm.h static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
vma                62 include/linux/hugetlb.h void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
vma                80 include/linux/hugetlb.h 			  struct vm_area_struct *vma,
vma                83 include/linux/hugetlb.h void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
vma                90 include/linux/hugetlb.h vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vma                98 include/linux/hugetlb.h 						struct vm_area_struct *vma,
vma               123 include/linux/hugetlb.h void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
vma               127 include/linux/hugetlb.h struct page *follow_huge_pd(struct vm_area_struct *vma,
vma               139 include/linux/hugetlb.h unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
vma               146 include/linux/hugetlb.h static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
vma               162 include/linux/hugetlb.h 				struct vm_area_struct *vma,
vma               169 include/linux/hugetlb.h #define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
vma               177 include/linux/hugetlb.h #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
vma               197 include/linux/hugetlb.h static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
vma               204 include/linux/hugetlb.h 			struct vm_area_struct *vma, unsigned long start,
vma               211 include/linux/hugetlb.h 			struct vm_area_struct *vma, unsigned long start,
vma               217 include/linux/hugetlb.h 				struct vm_area_struct *vma, unsigned long address,
vma               360 include/linux/hugetlb.h struct page *alloc_huge_page(struct vm_area_struct *vma,
vma               365 include/linux/hugetlb.h struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
vma               402 include/linux/hugetlb.h static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
vma               404 include/linux/hugetlb.h 	return hstate_file(vma->vm_file);
vma               412 include/linux/hugetlb.h extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
vma               414 include/linux/hugetlb.h extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
vma               449 include/linux/hugetlb.h static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
vma               576 include/linux/hugetlb.h static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
vma               579 include/linux/hugetlb.h 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
vma               585 include/linux/hugetlb.h static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
vma               589 include/linux/hugetlb.h 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
vma               596 include/linux/hugetlb.h static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
vma               615 include/linux/hugetlb.h 					       struct vm_area_struct *vma,
vma               636 include/linux/hugetlb.h static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
vma               656 include/linux/hugetlb.h static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
vma               661 include/linux/hugetlb.h static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
vma                 9 include/linux/hugetlb_inline.h static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
vma                11 include/linux/hugetlb_inline.h 	return !!(vma->vm_flags & VM_HUGETLB);
vma                16 include/linux/hugetlb_inline.h static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
vma                30 include/linux/ioc3.h 	struct ioc3 __iomem *vma;	/* pointer to registers */
vma               269 include/linux/kernfs.h 	int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
vma                16 include/linux/khugepaged.h extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
vma                54 include/linux/khugepaged.h static inline int khugepaged_enter(struct vm_area_struct *vma,
vma                57 include/linux/khugepaged.h 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
vma                61 include/linux/khugepaged.h 		    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
vma                62 include/linux/khugepaged.h 			if (__khugepaged_enter(vma->vm_mm))
vma                74 include/linux/khugepaged.h static inline int khugepaged_enter(struct vm_area_struct *vma,
vma                79 include/linux/khugepaged.h static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
vma                22 include/linux/ksm.h int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
vma                52 include/linux/ksm.h 			struct vm_area_struct *vma, unsigned long address);
vma                57 include/linux/ksm.h 			struct vm_area_struct *vma, unsigned long address);
vma                71 include/linux/ksm.h static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
vma                78 include/linux/ksm.h 			struct vm_area_struct *vma, unsigned long address)
vma                92 include/linux/ksm.h 			struct vm_area_struct *vma, unsigned long address)
vma              1296 include/linux/kvm_host.h 	int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
vma              1608 include/linux/lsm_hooks.h 	int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
vma                94 include/linux/mdev.h 	int	(*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
vma                95 include/linux/mempolicy.h #define vma_policy(vma) ((vma)->vm_policy)
vma               133 include/linux/mempolicy.h 				struct vm_area_struct *vma,
vma               140 include/linux/mempolicy.h struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
vma               142 include/linux/mempolicy.h bool vma_policy_mof(struct vm_area_struct *vma);
vma               149 include/linux/mempolicy.h extern int huge_node(struct vm_area_struct *vma,
vma               176 include/linux/mempolicy.h static inline bool vma_migratable(struct vm_area_struct *vma)
vma               178 include/linux/mempolicy.h 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
vma               185 include/linux/mempolicy.h 	if (vma_is_dax(vma))
vma               189 include/linux/mempolicy.h 	if (vma->vm_flags & VM_HUGETLB)
vma               198 include/linux/mempolicy.h 	if (vma->vm_file &&
vma               199 include/linux/mempolicy.h 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
vma               246 include/linux/mempolicy.h #define vma_policy(vma) NULL
vma               271 include/linux/mempolicy.h static inline int huge_node(struct vm_area_struct *vma,
vma               302 include/linux/mempolicy.h static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
vma               127 include/linux/migrate.h 				  struct vm_area_struct *vma, int node);
vma               134 include/linux/migrate.h 					 struct vm_area_struct *vma, int node)
vma               142 include/linux/migrate.h 			struct vm_area_struct *vma,
vma               148 include/linux/migrate.h 			struct vm_area_struct *vma,
vma               184 include/linux/migrate.h 	struct vm_area_struct	*vma;
vma               418 include/linux/mm.h 	struct vm_area_struct *vma;	/* Target VMA */
vma               489 include/linux/mm.h 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
vma               495 include/linux/mm.h 	const char *(*name)(struct vm_area_struct *vma);
vma               505 include/linux/mm.h 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
vma               517 include/linux/mm.h 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
vma               525 include/linux/mm.h 	struct page *(*find_special_page)(struct vm_area_struct *vma,
vma               529 include/linux/mm.h static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma               533 include/linux/mm.h 	memset(vma, 0, sizeof(*vma));
vma               534 include/linux/mm.h 	vma->vm_mm = mm;
vma               535 include/linux/mm.h 	vma->vm_ops = &dummy_vm_ops;
vma               536 include/linux/mm.h 	INIT_LIST_HEAD(&vma->anon_vma_chain);
vma               539 include/linux/mm.h static inline void vma_set_anonymous(struct vm_area_struct *vma)
vma               541 include/linux/mm.h 	vma->vm_ops = NULL;
vma               544 include/linux/mm.h static inline bool vma_is_anonymous(struct vm_area_struct *vma)
vma               546 include/linux/mm.h 	return !vma->vm_ops;
vma               554 include/linux/mm.h bool vma_is_shmem(struct vm_area_struct *vma);
vma               556 include/linux/mm.h static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
vma               559 include/linux/mm.h int vma_is_stack_for_current(struct vm_area_struct *vma);
vma               842 include/linux/mm.h static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
vma               844 include/linux/mm.h 	if (likely(vma->vm_flags & VM_WRITE))
vma              1451 include/linux/mm.h struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
vma              1453 include/linux/mm.h struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
vma              1456 include/linux/mm.h void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
vma              1458 include/linux/mm.h void zap_page_range(struct vm_area_struct *vma, unsigned long address,
vma              1468 include/linux/mm.h 			struct vm_area_struct *vma);
vma              1472 include/linux/mm.h int follow_pfn(struct vm_area_struct *vma, unsigned long address,
vma              1474 include/linux/mm.h int follow_phys(struct vm_area_struct *vma, unsigned long address,
vma              1476 include/linux/mm.h int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
vma              1488 include/linux/mm.h extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
vma              1498 include/linux/mm.h static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
vma              1624 include/linux/mm.h extern unsigned long move_page_tables(struct vm_area_struct *vma,
vma              1628 include/linux/mm.h extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
vma              1631 include/linux/mm.h extern int mprotect_fixup(struct vm_area_struct *vma,
vma              1748 include/linux/mm.h int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
vma              2254 include/linux/mm.h #define vma_interval_tree_foreach(vma, root, start, last)		\
vma              2255 include/linux/mm.h 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
vma              2256 include/linux/mm.h 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
vma              2277 include/linux/mm.h extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
vma              2280 include/linux/mm.h static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
vma              2283 include/linux/mm.h 	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
vma              2327 include/linux/mm.h extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
vma              2451 include/linux/mm.h extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
vma              2454 include/linux/mm.h extern int expand_downwards(struct vm_area_struct *vma,
vma              2457 include/linux/mm.h extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
vma              2459 include/linux/mm.h   #define expand_upwards(vma, address) (0)
vma              2471 include/linux/mm.h 	struct vm_area_struct * vma = find_vma(mm,start_addr);
vma              2473 include/linux/mm.h 	if (vma && end_addr <= vma->vm_start)
vma              2474 include/linux/mm.h 		vma = NULL;
vma              2475 include/linux/mm.h 	return vma;
vma              2478 include/linux/mm.h static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
vma              2480 include/linux/mm.h 	unsigned long vm_start = vma->vm_start;
vma              2482 include/linux/mm.h 	if (vma->vm_flags & VM_GROWSDOWN) {
vma              2484 include/linux/mm.h 		if (vm_start > vma->vm_start)
vma              2490 include/linux/mm.h static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
vma              2492 include/linux/mm.h 	unsigned long vm_end = vma->vm_end;
vma              2494 include/linux/mm.h 	if (vma->vm_flags & VM_GROWSUP) {
vma              2496 include/linux/mm.h 		if (vm_end < vma->vm_end)
vma              2502 include/linux/mm.h static inline unsigned long vma_pages(struct vm_area_struct *vma)
vma              2504 include/linux/mm.h 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
vma              2511 include/linux/mm.h 	struct vm_area_struct *vma = find_vma(mm, vm_start);
vma              2513 include/linux/mm.h 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma              2514 include/linux/mm.h 		vma = NULL;
vma              2516 include/linux/mm.h 	return vma;
vma              2519 include/linux/mm.h static inline bool range_in_vma(struct vm_area_struct *vma,
vma              2522 include/linux/mm.h 	return (vma && vma->vm_start <= start && end <= vma->vm_end);
vma              2527 include/linux/mm.h void vma_set_page_prot(struct vm_area_struct *vma);
vma              2533 include/linux/mm.h static inline void vma_set_page_prot(struct vm_area_struct *vma)
vma              2535 include/linux/mm.h 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma              2540 include/linux/mm.h unsigned long change_prot_numa(struct vm_area_struct *vma,
vma              2548 include/linux/mm.h int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
vma              2550 include/linux/mm.h int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
vma              2552 include/linux/mm.h vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
vma              2554 include/linux/mm.h vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
vma              2556 include/linux/mm.h vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
vma              2558 include/linux/mm.h vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
vma              2560 include/linux/mm.h int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
vma              2562 include/linux/mm.h static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
vma              2565 include/linux/mm.h 	int err = vm_insert_page(vma, addr, page);
vma              2582 include/linux/mm.h struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
vma              2763 include/linux/mm.h const char * arch_vma_name(struct vm_area_struct *vma);
vma              2856 include/linux/mm.h 				struct vm_area_struct *vma,
vma               733 include/linux/mm_types.h 				struct vm_area_struct *vma,
vma                14 include/linux/mmdebug.h void dump_vma(const struct vm_area_struct *vma);
vma                26 include/linux/mmdebug.h #define VM_BUG_ON_VMA(cond, vma)					\
vma                29 include/linux/mmdebug.h 			dump_vma(vma);					\
vma                47 include/linux/mmdebug.h #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
vma                65 include/linux/mmu_notifier.h 	struct vm_area_struct *vma;
vma               409 include/linux/mmu_notifier.h 					   struct vm_area_struct *vma,
vma               414 include/linux/mmu_notifier.h 	range->vma = vma;
vma               544 include/linux/mmu_notifier.h #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
vma               187 include/linux/net.h 				      struct vm_area_struct * vma);
vma               444 include/linux/pagemap.h extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
vma               447 include/linux/pagemap.h static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
vma               451 include/linux/pagemap.h 	if (unlikely(is_vm_hugetlb_page(vma)))
vma               452 include/linux/pagemap.h 		return linear_hugepage_index(vma, address);
vma               453 include/linux/pagemap.h 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
vma               454 include/linux/pagemap.h 	pgoff += vma->vm_pgoff;
vma                56 include/linux/pagewalk.h 	struct vm_area_struct *vma;
vma                63 include/linux/pagewalk.h int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
vma              1809 include/linux/pci.h 			    struct vm_area_struct *vma,
vma              1812 include/linux/pci.h 			struct vm_area_struct *vma,
vma              1821 include/linux/pci.h #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
vma              1823 include/linux/pci.h int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
vma              1170 include/linux/perf_event.h extern void perf_event_mmap(struct vm_area_struct *vma);
vma              1397 include/linux/perf_event.h static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
vma                12 include/linux/pkeys.h #define arch_override_mprotect_pkey(vma, prot, pkey) (0)
vma                16 include/linux/pkeys.h static inline int vma_pkey(struct vm_area_struct *vma)
vma                78 include/linux/rmap.h 	struct vm_area_struct *vma;
vma               148 include/linux/rmap.h static inline int anon_vma_prepare(struct vm_area_struct *vma)
vma               150 include/linux/rmap.h 	if (likely(vma->anon_vma))
vma               153 include/linux/rmap.h 	return __anon_vma_prepare(vma);
vma               156 include/linux/rmap.h static inline void anon_vma_merge(struct vm_area_struct *vma,
vma               159 include/linux/rmap.h 	VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
vma               207 include/linux/rmap.h 	struct vm_area_struct *vma;
vma               251 include/linux/rmap.h int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
vma               268 include/linux/rmap.h 	bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
vma               272 include/linux/rmap.h 	bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
vma               281 include/linux/rmap.h #define anon_vma_prepare(vma)	(0)
vma               282 include/linux/rmap.h #define anon_vma_link(vma)	do {} while (0)
vma               366 include/linux/security.h int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
vma               919 include/linux/security.h static inline int security_file_mprotect(struct vm_area_struct *vma,
vma                80 include/linux/shmem_fs.h extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
vma               117 include/linux/shmem_fs.h extern bool shmem_huge_enabled(struct vm_area_struct *vma);
vma               119 include/linux/shmem_fs.h static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
vma               348 include/linux/swap.h 						struct vm_area_struct *vma);
vma               418 include/linux/swap.h 				      struct vm_area_struct *vma,
vma               421 include/linux/swap.h 			struct vm_area_struct *vma, unsigned long addr,
vma               424 include/linux/swap.h 			struct vm_area_struct *vma, unsigned long addr,
vma               556 include/linux/swap.h 					     struct vm_area_struct *vma,
vma               208 include/linux/swapops.h extern void migration_entry_wait_huge(struct vm_area_struct *vma,
vma               233 include/linux/swapops.h static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
vma               170 include/linux/sysfs.h 		    struct vm_area_struct *vma);
vma               108 include/linux/uio_driver.h 	int (*mmap)(struct uio_info *info, struct vm_area_struct *vma);
vma               117 include/linux/uprobes.h extern int uprobe_mmap(struct vm_area_struct *vma);
vma               118 include/linux/uprobes.h extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
vma               169 include/linux/uprobes.h static inline int uprobe_mmap(struct vm_area_struct *vma)
vma               174 include/linux/uprobes.h uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma                44 include/linux/userfaultfd_k.h static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
vma                47 include/linux/userfaultfd_k.h 	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
vma                50 include/linux/userfaultfd_k.h static inline bool userfaultfd_missing(struct vm_area_struct *vma)
vma                52 include/linux/userfaultfd_k.h 	return vma->vm_flags & VM_UFFD_MISSING;
vma                55 include/linux/userfaultfd_k.h static inline bool userfaultfd_armed(struct vm_area_struct *vma)
vma                57 include/linux/userfaultfd_k.h 	return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
vma                69 include/linux/userfaultfd_k.h extern bool userfaultfd_remove(struct vm_area_struct *vma,
vma                73 include/linux/userfaultfd_k.h extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
vma                88 include/linux/userfaultfd_k.h static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
vma                94 include/linux/userfaultfd_k.h static inline bool userfaultfd_missing(struct vm_area_struct *vma)
vma                99 include/linux/userfaultfd_k.h static inline bool userfaultfd_armed(struct vm_area_struct *vma)
vma               104 include/linux/userfaultfd_k.h static inline int dup_userfaultfd(struct vm_area_struct *vma,
vma               114 include/linux/userfaultfd_k.h static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
vma               126 include/linux/userfaultfd_k.h static inline bool userfaultfd_remove(struct vm_area_struct *vma,
vma               133 include/linux/userfaultfd_k.h static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
vma                40 include/linux/vfio.h 	int	(*mmap)(void *device_data, struct vm_area_struct *vma);
vma                70 include/linux/vfio.h 	int		(*mmap)(void *iommu_data, struct vm_area_struct *vma);
vma               123 include/linux/vmalloc.h extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
vma               127 include/linux/vmalloc.h extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
vma               154 include/linux/vme.h int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
vma               278 include/media/dvb_vb2.h int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma);
vma               309 include/media/v4l2-mem2mem.h 		  struct vm_area_struct *vma);
vma               675 include/media/v4l2-mem2mem.h int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
vma               129 include/media/videobuf-core.h 				 struct vm_area_struct *vma);
vma               231 include/media/videobuf-core.h 			 struct vm_area_struct *vma);
vma                26 include/media/videobuf-vmalloc.h 	struct vm_area_struct *vma;
vma               147 include/media/videobuf2-core.h 	int		(*mmap)(void *buf_priv, struct vm_area_struct *vma);
vma               957 include/media/videobuf2-core.h int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
vma               291 include/media/videobuf2-v4l2.h int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
vma              1676 include/net/sock.h 		 struct vm_area_struct *vma);
vma               410 include/net/tcp.h 	     struct vm_area_struct *vma);
vma              2364 include/rdma/ib_verbs.h 	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
vma              2796 include/rdma/ib_verbs.h int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
vma              2800 include/rdma/ib_verbs.h 				    struct vm_area_struct *vma,
vma               116 include/sound/compress_driver.h 			struct vm_area_struct *vma);
vma                32 include/sound/hwdep.h 		    struct vm_area_struct *vma);
vma                57 include/sound/info.h 		    struct vm_area_struct *vma);
vma                76 include/sound/pcm.h 	int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
vma                23 include/sound/pxa2xx-lib.h 	struct vm_area_struct *vma);
vma               383 include/sound/soc-component.h 			       struct vm_area_struct *vma);
vma                29 include/trace/events/fs_dax.h 		__entry->vm_start = vmf->vma->vm_start;
vma                30 include/trace/events/fs_dax.h 		__entry->vm_end = vmf->vma->vm_end;
vma                31 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vma                79 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vma               122 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vma               168 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vma               211 include/trace/events/fs_dax.h 		__entry->vm_flags = vmf->vma->vm_flags;
vma                65 include/xen/xen-ops.h int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
vma                69 include/xen/xen-ops.h static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
vma                82 include/xen/xen-ops.h int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
vma                88 include/xen/xen-ops.h int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
vma                95 include/xen/xen-ops.h static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
vma               105 include/xen/xen-ops.h static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
vma               112 include/xen/xen-ops.h int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
vma               132 include/xen/xen-ops.h static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
vma               140 include/xen/xen-ops.h 		return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
vma               148 include/xen/xen-ops.h 	return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
vma               169 include/xen/xen-ops.h static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
vma               178 include/xen/xen-ops.h 	return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
vma               194 include/xen/xen-ops.h static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
vma               203 include/xen/xen-ops.h 	return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
vma               207 include/xen/xen-ops.h int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
vma                92 ipc/shm.c      static void shm_open(struct vm_area_struct *vma);
vma                93 ipc/shm.c      static void shm_close(struct vm_area_struct *vma);
vma               235 ipc/shm.c      static int __shm_open(struct vm_area_struct *vma)
vma               237 ipc/shm.c      	struct file *file = vma->vm_file;
vma               260 ipc/shm.c      static void shm_open(struct vm_area_struct *vma)
vma               262 ipc/shm.c      	int err = __shm_open(vma);
vma               322 ipc/shm.c      static void shm_close(struct vm_area_struct *vma)
vma               324 ipc/shm.c      	struct file *file = vma->vm_file;
vma               431 ipc/shm.c      	struct file *file = vmf->vma->vm_file;
vma               437 ipc/shm.c      static int shm_split(struct vm_area_struct *vma, unsigned long addr)
vma               439 ipc/shm.c      	struct file *file = vma->vm_file;
vma               443 ipc/shm.c      		return sfd->vm_ops->split(vma, addr);
vma               448 ipc/shm.c      static unsigned long shm_pagesize(struct vm_area_struct *vma)
vma               450 ipc/shm.c      	struct file *file = vma->vm_file;
vma               454 ipc/shm.c      		return sfd->vm_ops->pagesize(vma);
vma               460 ipc/shm.c      static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
vma               462 ipc/shm.c      	struct file *file = vma->vm_file;
vma               467 ipc/shm.c      		err = sfd->vm_ops->set_policy(vma, new);
vma               471 ipc/shm.c      static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
vma               474 ipc/shm.c      	struct file *file = vma->vm_file;
vma               479 ipc/shm.c      		pol = sfd->vm_ops->get_policy(vma, addr);
vma               480 ipc/shm.c      	else if (vma->vm_policy)
vma               481 ipc/shm.c      		pol = vma->vm_policy;
vma               487 ipc/shm.c      static int shm_mmap(struct file *file, struct vm_area_struct *vma)
vma               497 ipc/shm.c      	ret = __shm_open(vma);
vma               501 ipc/shm.c      	ret = call_mmap(sfd->file, vma);
vma               503 ipc/shm.c      		shm_close(vma);
vma               506 ipc/shm.c      	sfd->vm_ops = vma->vm_ops;
vma               510 ipc/shm.c      	vma->vm_ops = &shm_vm_ops;
vma              1629 ipc/shm.c      	struct vm_area_struct *vma;
vma              1665 ipc/shm.c      	vma = find_vma(mm, addr);
vma              1668 ipc/shm.c      	while (vma) {
vma              1669 ipc/shm.c      		next = vma->vm_next;
vma              1676 ipc/shm.c      		if ((vma->vm_ops == &shm_vm_ops) &&
vma              1677 ipc/shm.c      			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
vma              1685 ipc/shm.c      			file = vma->vm_file;
vma              1686 ipc/shm.c      			size = i_size_read(file_inode(vma->vm_file));
vma              1687 ipc/shm.c      			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
vma              1695 ipc/shm.c      			vma = next;
vma              1698 ipc/shm.c      		vma = next;
vma              1707 ipc/shm.c      	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
vma              1708 ipc/shm.c      		next = vma->vm_next;
vma              1711 ipc/shm.c      		if ((vma->vm_ops == &shm_vm_ops) &&
vma              1712 ipc/shm.c      		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
vma              1713 ipc/shm.c      		    (vma->vm_file == file))
vma              1714 ipc/shm.c      			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
vma              1715 ipc/shm.c      		vma = next;
vma              1722 ipc/shm.c      	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
vma              1723 ipc/shm.c      		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
vma               540 kernel/acct.c  		struct vm_area_struct *vma;
vma               543 kernel/acct.c  		vma = current->mm->mmap;
vma               544 kernel/acct.c  		while (vma) {
vma               545 kernel/acct.c  			vsize += vma->vm_end - vma->vm_start;
vma               546 kernel/acct.c  			vma = vma->vm_next;
vma               244 kernel/bpf/stackmap.c static int stack_map_get_build_id(struct vm_area_struct *vma,
vma               253 kernel/bpf/stackmap.c 	if (!vma->vm_file)
vma               256 kernel/bpf/stackmap.c 	page = find_get_page(vma->vm_file->f_mapping, 0);
vma               286 kernel/bpf/stackmap.c 	struct vm_area_struct *vma;
vma               319 kernel/bpf/stackmap.c 		vma = find_vma(current->mm, ips[i]);
vma               320 kernel/bpf/stackmap.c 		if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
vma               327 kernel/bpf/stackmap.c 		id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
vma               328 kernel/bpf/stackmap.c 			- vma->vm_start;
vma               239 kernel/dma/coherent.c 		struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
vma               243 kernel/dma/coherent.c 		unsigned long off = vma->vm_pgoff;
vma               245 kernel/dma/coherent.c 		unsigned long user_count = vma_pages(vma);
vma               251 kernel/dma/coherent.c 			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
vma               253 kernel/dma/coherent.c 					       vma->vm_page_prot);
vma               275 kernel/dma/coherent.c int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
vma               280 kernel/dma/coherent.c 	return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
vma               283 kernel/dma/coherent.c int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
vma               289 kernel/dma/coherent.c 	return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
vma                 7 kernel/dma/dummy.c static int dma_dummy_mmap(struct device *dev, struct vm_area_struct *vma,
vma               189 kernel/dma/mapping.c int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
vma               194 kernel/dma/mapping.c 	unsigned long user_count = vma_pages(vma);
vma               196 kernel/dma/mapping.c 	unsigned long off = vma->vm_pgoff;
vma               200 kernel/dma/mapping.c 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
vma               202 kernel/dma/mapping.c 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
vma               220 kernel/dma/mapping.c 	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma               221 kernel/dma/mapping.c 			user_count << PAGE_SHIFT, vma->vm_page_prot);
vma               261 kernel/dma/mapping.c int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
vma               268 kernel/dma/mapping.c 		return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
vma               272 kernel/dma/mapping.c 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
vma              5433 kernel/events/core.c 	struct perf_event *event = vmf->vma->vm_file->private_data;
vma              5456 kernel/events/core.c 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vma              5563 kernel/events/core.c static void perf_mmap_open(struct vm_area_struct *vma)
vma              5565 kernel/events/core.c 	struct perf_event *event = vma->vm_file->private_data;
vma              5570 kernel/events/core.c 	if (vma->vm_pgoff)
vma              5574 kernel/events/core.c 		event->pmu->event_mapped(event, vma->vm_mm);
vma              5587 kernel/events/core.c static void perf_mmap_close(struct vm_area_struct *vma)
vma              5589 kernel/events/core.c 	struct perf_event *event = vma->vm_file->private_data;
vma              5597 kernel/events/core.c 		event->pmu->event_unmapped(event, vma->vm_mm);
vma              5604 kernel/events/core.c 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
vma              5616 kernel/events/core.c 		atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
vma              5690 kernel/events/core.c 	atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
vma              5704 kernel/events/core.c static int perf_mmap(struct file *file, struct vm_area_struct *vma)
vma              5724 kernel/events/core.c 	if (!(vma->vm_flags & VM_SHARED))
vma              5727 kernel/events/core.c 	vma_size = vma->vm_end - vma->vm_start;
vma              5729 kernel/events/core.c 	if (vma->vm_pgoff == 0) {
vma              5757 kernel/events/core.c 		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
vma              5761 kernel/events/core.c 		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
vma              5858 kernel/events/core.c 	locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
vma              5868 kernel/events/core.c 	if (vma->vm_flags & VM_WRITE)
vma              5890 kernel/events/core.c 		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
vma              5899 kernel/events/core.c 		atomic64_add(extra, &vma->vm_mm->pinned_vm);
vma              5912 kernel/events/core.c 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma              5913 kernel/events/core.c 	vma->vm_ops = &perf_mmap_vmops;
vma              5916 kernel/events/core.c 		event->pmu->event_mapped(event, vma->vm_mm);
vma              7356 kernel/events/core.c 	struct vm_area_struct	*vma;
vma              7380 kernel/events/core.c 	struct vm_area_struct *vma = mmap_event->vma;
vma              7381 kernel/events/core.c 	int executable = vma->vm_flags & VM_EXEC;
vma              7443 kernel/events/core.c 	struct vm_area_struct *vma = mmap_event->vma;
vma              7444 kernel/events/core.c 	struct file *file = vma->vm_file;
vma              7453 kernel/events/core.c 	if (vma->vm_flags & VM_READ)
vma              7455 kernel/events/core.c 	if (vma->vm_flags & VM_WRITE)
vma              7457 kernel/events/core.c 	if (vma->vm_flags & VM_EXEC)
vma              7460 kernel/events/core.c 	if (vma->vm_flags & VM_MAYSHARE)
vma              7465 kernel/events/core.c 	if (vma->vm_flags & VM_DENYWRITE)
vma              7467 kernel/events/core.c 	if (vma->vm_flags & VM_MAYEXEC)
vma              7469 kernel/events/core.c 	if (vma->vm_flags & VM_LOCKED)
vma              7471 kernel/events/core.c 	if (vma->vm_flags & VM_HUGETLB)
vma              7493 kernel/events/core.c 		inode = file_inode(vma->vm_file);
vma              7502 kernel/events/core.c 		if (vma->vm_ops && vma->vm_ops->name) {
vma              7503 kernel/events/core.c 			name = (char *) vma->vm_ops->name(vma);
vma              7508 kernel/events/core.c 		name = (char *)arch_vma_name(vma);
vma              7512 kernel/events/core.c 		if (vma->vm_start <= vma->vm_mm->start_brk &&
vma              7513 kernel/events/core.c 				vma->vm_end >= vma->vm_mm->brk) {
vma              7517 kernel/events/core.c 		if (vma->vm_start <= vma->vm_mm->start_stack &&
vma              7518 kernel/events/core.c 				vma->vm_end >= vma->vm_mm->start_stack) {
vma              7549 kernel/events/core.c 	if (!(vma->vm_flags & VM_EXEC))
vma              7585 kernel/events/core.c 					struct vm_area_struct *vma,
vma              7588 kernel/events/core.c 	unsigned long vma_size = vma->vm_end - vma->vm_start;
vma              7589 kernel/events/core.c 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
vma              7590 kernel/events/core.c 	struct file *file = vma->vm_file;
vma              7596 kernel/events/core.c 		fr->start = vma->vm_start;
vma              7599 kernel/events/core.c 		fr->start = vma->vm_start + filter->offset - off;
vma              7600 kernel/events/core.c 		fr->size = min(vma->vm_end - fr->start, filter->size);
vma              7609 kernel/events/core.c 	struct vm_area_struct *vma = data;
vma              7617 kernel/events/core.c 	if (!vma->vm_file)
vma              7622 kernel/events/core.c 		if (perf_addr_filter_vma_adjust(filter, vma,
vma              7640 kernel/events/core.c static void perf_addr_filters_adjust(struct vm_area_struct *vma)
vma              7649 kernel/events/core.c 	if (!(vma->vm_flags & VM_EXEC))
vma              7658 kernel/events/core.c 		perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
vma              7663 kernel/events/core.c void perf_event_mmap(struct vm_area_struct *vma)
vma              7671 kernel/events/core.c 		.vma	= vma,
vma              7682 kernel/events/core.c 			.start  = vma->vm_start,
vma              7683 kernel/events/core.c 			.len    = vma->vm_end - vma->vm_start,
vma              7684 kernel/events/core.c 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
vma              7694 kernel/events/core.c 	perf_addr_filters_adjust(vma);
vma              9218 kernel/events/core.c 	struct vm_area_struct *vma;
vma              9220 kernel/events/core.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              9221 kernel/events/core.c 		if (!vma->vm_file)
vma              9224 kernel/events/core.c 		if (perf_addr_filter_vma_adjust(filter, vma, fr))
vma               121 kernel/events/uprobes.c static bool valid_vma(struct vm_area_struct *vma, bool is_register)
vma               128 kernel/events/uprobes.c 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
vma               131 kernel/events/uprobes.c static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
vma               133 kernel/events/uprobes.c 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vma               136 kernel/events/uprobes.c static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
vma               138 kernel/events/uprobes.c 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
vma               154 kernel/events/uprobes.c static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
vma               157 kernel/events/uprobes.c 	struct mm_struct *mm = vma->vm_mm;
vma               160 kernel/events/uprobes.c 		.vma = vma,
vma               167 kernel/events/uprobes.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
vma               171 kernel/events/uprobes.c 		err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
vma               191 kernel/events/uprobes.c 		page_add_new_anon_rmap(new_page, vma, addr, false);
vma               193 kernel/events/uprobes.c 		lru_cache_add_active_or_unevictable(new_page, vma);
vma               203 kernel/events/uprobes.c 	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
vma               204 kernel/events/uprobes.c 	ptep_clear_flush_notify(vma, addr, pvmw.pte);
vma               207 kernel/events/uprobes.c 				  mk_pte(new_page, vma->vm_page_prot));
vma               214 kernel/events/uprobes.c 	if (vma->vm_flags & VM_LOCKED)
vma               349 kernel/events/uprobes.c 			      struct vm_area_struct *vma)
vma               351 kernel/events/uprobes.c 	unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
vma               354 kernel/events/uprobes.c 		vma->vm_file &&
vma               355 kernel/events/uprobes.c 		file_inode(vma->vm_file) == uprobe->inode &&
vma               356 kernel/events/uprobes.c 		(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
vma               357 kernel/events/uprobes.c 		vma->vm_start <= vaddr &&
vma               358 kernel/events/uprobes.c 		vma->vm_end > vaddr;
vma               378 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma               386 kernel/events/uprobes.c 			FOLL_WRITE, &page, &vma, NULL);
vma               474 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma               487 kernel/events/uprobes.c 				    &old_page, &vma, NULL);
vma               514 kernel/events/uprobes.c 	ret = anon_vma_prepare(vma);
vma               519 kernel/events/uprobes.c 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
vma               533 kernel/events/uprobes.c 		index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
vma               534 kernel/events/uprobes.c 		orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
vma               551 kernel/events/uprobes.c 	ret = __replace_page(vma, vaddr, old_page, new_page);
vma               904 kernel/events/uprobes.c 			struct vm_area_struct *vma, unsigned long vaddr)
vma               909 kernel/events/uprobes.c 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
vma               975 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma               983 kernel/events/uprobes.c 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
vma               984 kernel/events/uprobes.c 		if (!valid_vma(vma, is_register))
vma              1002 kernel/events/uprobes.c 		if (!mmget_not_zero(vma->vm_mm))
vma              1010 kernel/events/uprobes.c 		info->mm = vma->vm_mm;
vma              1011 kernel/events/uprobes.c 		info->vaddr = offset_to_vaddr(vma, offset);
vma              1058 kernel/events/uprobes.c 		struct vm_area_struct *vma;
vma              1064 kernel/events/uprobes.c 		vma = find_vma(mm, info->vaddr);
vma              1065 kernel/events/uprobes.c 		if (!vma || !valid_vma(vma, is_register) ||
vma              1066 kernel/events/uprobes.c 		    file_inode(vma->vm_file) != uprobe->inode)
vma              1069 kernel/events/uprobes.c 		if (vma->vm_start > info->vaddr ||
vma              1070 kernel/events/uprobes.c 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
vma              1077 kernel/events/uprobes.c 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
vma              1246 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma              1250 kernel/events/uprobes.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              1254 kernel/events/uprobes.c 		if (!valid_vma(vma, false) ||
vma              1255 kernel/events/uprobes.c 		    file_inode(vma->vm_file) != uprobe->inode)
vma              1258 kernel/events/uprobes.c 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
vma              1260 kernel/events/uprobes.c 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
vma              1263 kernel/events/uprobes.c 		vaddr = offset_to_vaddr(vma, uprobe->offset);
vma              1300 kernel/events/uprobes.c 				struct vm_area_struct *vma,
vma              1309 kernel/events/uprobes.c 	min = vaddr_to_offset(vma, start);
vma              1334 kernel/events/uprobes.c static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
vma              1345 kernel/events/uprobes.c 		if (du->mm != vma->vm_mm ||
vma              1346 kernel/events/uprobes.c 		    !valid_ref_ctr_vma(du->uprobe, vma))
vma              1349 kernel/events/uprobes.c 		vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
vma              1350 kernel/events/uprobes.c 		ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
vma              1352 kernel/events/uprobes.c 			update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
vma              1368 kernel/events/uprobes.c int uprobe_mmap(struct vm_area_struct *vma)
vma              1377 kernel/events/uprobes.c 	if (vma->vm_file &&
vma              1378 kernel/events/uprobes.c 	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
vma              1379 kernel/events/uprobes.c 	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
vma              1380 kernel/events/uprobes.c 		delayed_ref_ctr_inc(vma);
vma              1382 kernel/events/uprobes.c 	if (!valid_vma(vma, true))
vma              1385 kernel/events/uprobes.c 	inode = file_inode(vma->vm_file);
vma              1390 kernel/events/uprobes.c 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
vma              1398 kernel/events/uprobes.c 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
vma              1399 kernel/events/uprobes.c 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
vma              1400 kernel/events/uprobes.c 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
vma              1410 kernel/events/uprobes.c vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma              1416 kernel/events/uprobes.c 	inode = file_inode(vma->vm_file);
vma              1418 kernel/events/uprobes.c 	min = vaddr_to_offset(vma, start);
vma              1431 kernel/events/uprobes.c void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
vma              1433 kernel/events/uprobes.c 	if (no_uprobe_events() || !valid_vma(vma, false))
vma              1436 kernel/events/uprobes.c 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
vma              1439 kernel/events/uprobes.c 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
vma              1440 kernel/events/uprobes.c 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
vma              1443 kernel/events/uprobes.c 	if (vma_has_uprobes(vma, start, end))
vma              1444 kernel/events/uprobes.c 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
vma              1450 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma              1471 kernel/events/uprobes.c 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
vma              1474 kernel/events/uprobes.c 	if (IS_ERR(vma)) {
vma              1475 kernel/events/uprobes.c 		ret = PTR_ERR(vma);
vma              1998 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma              2000 kernel/events/uprobes.c 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              2001 kernel/events/uprobes.c 		if (!valid_vma(vma, false))
vma              2009 kernel/events/uprobes.c 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
vma              2054 kernel/events/uprobes.c 	struct vm_area_struct *vma;
vma              2057 kernel/events/uprobes.c 	vma = find_vma(mm, bp_vaddr);
vma              2058 kernel/events/uprobes.c 	if (vma && vma->vm_start <= bp_vaddr) {
vma              2059 kernel/events/uprobes.c 		if (valid_vma(vma, false)) {
vma              2060 kernel/events/uprobes.c 			struct inode *inode = file_inode(vma->vm_file);
vma              2061 kernel/events/uprobes.c 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
vma               346 kernel/fork.c  	struct vm_area_struct *vma;
vma               348 kernel/fork.c  	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
vma               349 kernel/fork.c  	if (vma)
vma               350 kernel/fork.c  		vma_init(vma, mm);
vma               351 kernel/fork.c  	return vma;
vma               365 kernel/fork.c  void vm_area_free(struct vm_area_struct *vma)
vma               367 kernel/fork.c  	kmem_cache_free(vm_area_cachep, vma);
vma               272 kernel/kcov.c  static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
vma               276 kernel/kcov.c  	struct kcov *kcov = vma->vm_file->private_data;
vma               280 kernel/kcov.c  	area = vmalloc_user(vma->vm_end - vma->vm_start);
vma               286 kernel/kcov.c  	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
vma               287 kernel/kcov.c  	    vma->vm_end - vma->vm_start != size) {
vma               293 kernel/kcov.c  		vma->vm_flags |= VM_DONTEXPAND;
vma               297 kernel/kcov.c  			if (vm_insert_page(vma, vma->vm_start + off, page))
vma                33 kernel/relay.c static void relay_file_mmap_close(struct vm_area_struct *vma)
vma                35 kernel/relay.c 	struct rchan_buf *buf = vma->vm_private_data;
vma                36 kernel/relay.c 	buf->chan->cb->buf_unmapped(buf, vma->vm_file);
vma                45 kernel/relay.c 	struct rchan_buf *buf = vmf->vma->vm_private_data;
vma                96 kernel/relay.c static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
vma                98 kernel/relay.c 	unsigned long length = vma->vm_end - vma->vm_start;
vma                99 kernel/relay.c 	struct file *filp = vma->vm_file;
vma               107 kernel/relay.c 	vma->vm_ops = &relay_file_mmap_ops;
vma               108 kernel/relay.c 	vma->vm_flags |= VM_DONTEXPAND;
vma               109 kernel/relay.c 	vma->vm_private_data = buf;
vma               916 kernel/relay.c static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
vma               919 kernel/relay.c 	return relay_mmap_buf(buf, vma);
vma              2490 kernel/sched/fair.c 	struct vm_area_struct *vma;
vma              2546 kernel/sched/fair.c 	vma = find_vma(mm, start);
vma              2547 kernel/sched/fair.c 	if (!vma) {
vma              2550 kernel/sched/fair.c 		vma = mm->mmap;
vma              2552 kernel/sched/fair.c 	for (; vma; vma = vma->vm_next) {
vma              2553 kernel/sched/fair.c 		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
vma              2554 kernel/sched/fair.c 			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
vma              2564 kernel/sched/fair.c 		if (!vma->vm_mm ||
vma              2565 kernel/sched/fair.c 		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
vma              2572 kernel/sched/fair.c 		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
vma              2576 kernel/sched/fair.c 			start = max(start, vma->vm_start);
vma              2578 kernel/sched/fair.c 			end = min(end, vma->vm_end);
vma              2579 kernel/sched/fair.c 			nr_pte_updates = change_prot_numa(vma, start, end);
vma              2598 kernel/sched/fair.c 		} while (end != vma->vm_end);
vma              2608 kernel/sched/fair.c 	if (vma)
vma              4505 kernel/signal.c __weak const char *arch_vma_name(struct vm_area_struct *vma)
vma              1844 kernel/sys.c   		struct vm_area_struct *vma;
vma              1847 kernel/sys.c   		for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              1848 kernel/sys.c   			if (!vma->vm_file)
vma              1850 kernel/sys.c   			if (path_equal(&vma->vm_file->f_path,
vma              2093 kernel/sys.c   	struct vm_area_struct *vma;
vma              2126 kernel/sys.c   	vma = find_vma(mm, addr);
vma              2196 kernel/sys.c   		if (!vma) {
vma               379 kernel/trace/trace_output.c 		const struct vm_area_struct *vma;
vma               382 kernel/trace/trace_output.c 		vma = find_vma(mm, ip);
vma               383 kernel/trace/trace_output.c 		if (vma) {
vma               384 kernel/trace/trace_output.c 			file = vma->vm_file;
vma               385 kernel/trace/trace_output.c 			vmstart = vma->vm_start;
vma               120 mm/debug.c     void dump_vma(const struct vm_area_struct *vma)
vma               127 mm/debug.c     		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
vma               128 mm/debug.c     		vma->vm_prev, vma->vm_mm,
vma               129 mm/debug.c     		(unsigned long)pgprot_val(vma->vm_page_prot),
vma               130 mm/debug.c     		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
vma               131 mm/debug.c     		vma->vm_file, vma->vm_private_data,
vma               132 mm/debug.c     		vma->vm_flags, &vma->vm_flags);
vma              2367 mm/filemap.c   				up_read(&vmf->vma->vm_mm->mmap_sem);
vma              2385 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vma              2392 mm/filemap.c   	if (vmf->vma->vm_flags & VM_RAND_READ)
vma              2397 mm/filemap.c   	if (vmf->vma->vm_flags & VM_SEQ_READ) {
vma              2434 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vma              2441 mm/filemap.c   	if (vmf->vma->vm_flags & VM_RAND_READ)
vma              2479 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vma              2506 mm/filemap.c   		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
vma              2605 mm/filemap.c   	struct file *file = vmf->vma->vm_file;
vma              2674 mm/filemap.c   	struct inode *inode = file_inode(vmf->vma->vm_file);
vma              2678 mm/filemap.c   	file_update_time(vmf->vma->vm_file);
vma              2705 mm/filemap.c   int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
vma              2712 mm/filemap.c   	vma->vm_ops = &generic_file_vm_ops;
vma              2719 mm/filemap.c   int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
vma              2721 mm/filemap.c   	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
vma              2723 mm/filemap.c   	return generic_file_mmap(file, vma);
vma              2730 mm/filemap.c   int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
vma              2734 mm/filemap.c   int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
vma                38 mm/frame_vector.c 	struct vm_area_struct *vma;
vma                53 mm/frame_vector.c 	vma = find_vma_intersection(mm, start, start + 1);
vma                54 mm/frame_vector.c 	if (!vma) {
vma                67 mm/frame_vector.c 	if (vma_is_fsdax(vma)) {
vma                72 mm/frame_vector.c 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vma                85 mm/frame_vector.c 		while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
vma                86 mm/frame_vector.c 			err = follow_pfn(vma, start, &nums[ret]);
vma                99 mm/frame_vector.c 		if (ret >= nr_frames || start < vma->vm_end)
vma               101 mm/frame_vector.c 		vma = find_vma_intersection(mm, start, start + 1);
vma               102 mm/frame_vector.c 	} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
vma               123 mm/gup.c       static struct page *no_page_table(struct vm_area_struct *vma,
vma               134 mm/gup.c       	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
vma               139 mm/gup.c       static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
vma               154 mm/gup.c       			set_pte_at(vma->vm_mm, address, pte, entry);
vma               155 mm/gup.c       			update_mmu_cache(vma, address, pte);
vma               177 mm/gup.c       static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
vma               179 mm/gup.c       	return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
vma               182 mm/gup.c       static struct page *follow_page_pte(struct vm_area_struct *vma,
vma               186 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
vma               193 mm/gup.c       		return no_page_table(vma, flags);
vma               222 mm/gup.c       	page = vm_normal_page(vma, address, pte);
vma               245 mm/gup.c       			ret = follow_pfn_pte(vma, address, ptep, flags);
vma               281 mm/gup.c       	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
vma               314 mm/gup.c       	return no_page_table(vma, flags);
vma               317 mm/gup.c       static struct page *follow_pmd_mask(struct vm_area_struct *vma,
vma               325 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
vma               334 mm/gup.c       		return no_page_table(vma, flags);
vma               335 mm/gup.c       	if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
vma               339 mm/gup.c       		return no_page_table(vma, flags);
vma               342 mm/gup.c       		page = follow_huge_pd(vma, address,
vma               347 mm/gup.c       		return no_page_table(vma, flags);
vma               352 mm/gup.c       			return no_page_table(vma, flags);
vma               363 mm/gup.c       			return no_page_table(vma, flags);
vma               368 mm/gup.c       		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
vma               374 mm/gup.c       		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
vma               377 mm/gup.c       		return no_page_table(vma, flags);
vma               383 mm/gup.c       		return no_page_table(vma, flags);
vma               388 mm/gup.c       			return no_page_table(vma, flags);
vma               394 mm/gup.c       		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
vma               402 mm/gup.c       			split_huge_pmd(vma, pmd, address);
vma               416 mm/gup.c       				return no_page_table(vma, flags);
vma               419 mm/gup.c       			split_huge_pmd(vma, pmd, address);
vma               424 mm/gup.c       			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
vma               426 mm/gup.c       	page = follow_trans_huge_pmd(vma, address, pmd, flags);
vma               432 mm/gup.c       static struct page *follow_pud_mask(struct vm_area_struct *vma,
vma               440 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
vma               444 mm/gup.c       		return no_page_table(vma, flags);
vma               445 mm/gup.c       	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
vma               449 mm/gup.c       		return no_page_table(vma, flags);
vma               452 mm/gup.c       		page = follow_huge_pd(vma, address,
vma               457 mm/gup.c       		return no_page_table(vma, flags);
vma               461 mm/gup.c       		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
vma               467 mm/gup.c       		return no_page_table(vma, flags);
vma               469 mm/gup.c       	return follow_pmd_mask(vma, address, pud, flags, ctx);
vma               472 mm/gup.c       static struct page *follow_p4d_mask(struct vm_area_struct *vma,
vma               482 mm/gup.c       		return no_page_table(vma, flags);
vma               485 mm/gup.c       		return no_page_table(vma, flags);
vma               488 mm/gup.c       		page = follow_huge_pd(vma, address,
vma               493 mm/gup.c       		return no_page_table(vma, flags);
vma               495 mm/gup.c       	return follow_pud_mask(vma, address, p4d, flags, ctx);
vma               517 mm/gup.c       static struct page *follow_page_mask(struct vm_area_struct *vma,
vma               523 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
vma               537 mm/gup.c       		return no_page_table(vma, flags);
vma               543 mm/gup.c       		return no_page_table(vma, flags);
vma               546 mm/gup.c       		page = follow_huge_pd(vma, address,
vma               551 mm/gup.c       		return no_page_table(vma, flags);
vma               554 mm/gup.c       	return follow_p4d_mask(vma, address, pgd, flags, ctx);
vma               557 mm/gup.c       struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
vma               563 mm/gup.c       	page = follow_page_mask(vma, address, foll_flags, &ctx);
vma               570 mm/gup.c       		unsigned int gup_flags, struct vm_area_struct **vma,
vma               602 mm/gup.c       	*vma = get_gate_vma(mm);
vma               605 mm/gup.c       	*page = vm_normal_page(*vma, address, *pte);
vma               627 mm/gup.c       static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
vma               649 mm/gup.c       	ret = handle_mm_fault(vma, address, fault_flags);
vma               680 mm/gup.c       	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
vma               685 mm/gup.c       static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
vma               687 mm/gup.c       	vm_flags_t vm_flags = vma->vm_flags;
vma               694 mm/gup.c       	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
vma               727 mm/gup.c       	if (!arch_vma_access_permitted(vma, write, false, foreign))
vma               794 mm/gup.c       	struct vm_area_struct *vma = NULL;
vma               818 mm/gup.c       		if (!vma || start >= vma->vm_end) {
vma               819 mm/gup.c       			vma = find_extend_vma(mm, start);
vma               820 mm/gup.c       			if (!vma && in_gate_area(mm, start)) {
vma               822 mm/gup.c       						gup_flags, &vma,
vma               830 mm/gup.c       			if (!vma || check_vma_flags(vma, gup_flags)) {
vma               834 mm/gup.c       			if (is_vm_hugetlb_page(vma)) {
vma               835 mm/gup.c       				if (should_force_cow_break(vma, foll_flags))
vma               837 mm/gup.c       				i = follow_hugetlb_page(mm, vma, pages, vmas,
vma               844 mm/gup.c       		if (should_force_cow_break(vma, foll_flags))
vma               858 mm/gup.c       		page = follow_page_mask(vma, start, foll_flags, &ctx);
vma               860 mm/gup.c       			ret = faultin_page(tsk, vma, start, &foll_flags,
vma               888 mm/gup.c       			flush_anon_page(vma, page, start);
vma               894 mm/gup.c       			vmas[i] = vma;
vma               910 mm/gup.c       static bool vma_permits_fault(struct vm_area_struct *vma,
vma               917 mm/gup.c       	if (!(vm_flags & vma->vm_flags))
vma               927 mm/gup.c       	if (!arch_vma_access_permitted(vma, write, false, foreign))
vma               967 mm/gup.c       	struct vm_area_struct *vma;
vma               976 mm/gup.c       	vma = find_extend_vma(mm, address);
vma               977 mm/gup.c       	if (!vma || address < vma->vm_start)
vma               980 mm/gup.c       	if (!vma_permits_fault(vma, fault_flags))
vma               983 mm/gup.c       	ret = handle_mm_fault(vma, address, fault_flags);
vma              1203 mm/gup.c       long populate_vma_page_range(struct vm_area_struct *vma,
vma              1206 mm/gup.c       	struct mm_struct *mm = vma->vm_mm;
vma              1212 mm/gup.c       	VM_BUG_ON_VMA(start < vma->vm_start, vma);
vma              1213 mm/gup.c       	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
vma              1217 mm/gup.c       	if (vma->vm_flags & VM_LOCKONFAULT)
vma              1224 mm/gup.c       	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
vma              1231 mm/gup.c       	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
vma              1253 mm/gup.c       	struct vm_area_struct *vma = NULL;
vma              1267 mm/gup.c       			vma = find_vma(mm, nstart);
vma              1268 mm/gup.c       		} else if (nstart >= vma->vm_end)
vma              1269 mm/gup.c       			vma = vma->vm_next;
vma              1270 mm/gup.c       		if (!vma || vma->vm_start >= end)
vma              1276 mm/gup.c       		nend = min(end, vma->vm_end);
vma              1277 mm/gup.c       		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
vma              1279 mm/gup.c       		if (nstart < vma->vm_start)
vma              1280 mm/gup.c       			nstart = vma->vm_start;
vma              1286 mm/gup.c       		ret = populate_vma_page_range(vma, nstart, nend, &locked);
vma              1319 mm/gup.c       	struct vm_area_struct *vma;
vma              1323 mm/gup.c       			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
vma              1326 mm/gup.c       	flush_cache_page(vma, addr, page_to_pfn(page));
vma              1337 mm/gup.c       	struct vm_area_struct *vma;
vma              1350 mm/gup.c       		vma = find_vma(mm, start);
vma              1351 mm/gup.c       		if (!vma)
vma              1355 mm/gup.c       		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
vma              1356 mm/gup.c       		    !(vm_flags & vma->vm_flags))
vma              1365 mm/gup.c       			vmas[i] = vma;
vma              1383 mm/gup.c       		struct vm_area_struct *vma = vmas[i];
vma              1385 mm/gup.c       		if (vma == vma_prev)
vma              1388 mm/gup.c       		vma_prev = vma;
vma              1390 mm/gup.c       		if (vma_is_fsdax(vma))
vma               229 mm/hmm.c       	struct vm_area_struct *vma = walk->vma;
vma               232 mm/hmm.c       	if (!vma)
vma               240 mm/hmm.c       	ret = handle_mm_fault(vma, addr, flags);
vma               295 mm/hmm.c       	if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
vma               707 mm/hmm.c       	split_huge_pud(walk->vma, pudp, addr);
vma               733 mm/hmm.c       	struct vm_area_struct *vma = walk->vma;
vma               740 mm/hmm.c       	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
vma               896 mm/hmm.c       	struct vm_area_struct *vma;
vma               906 mm/hmm.c       		vma = find_vma(hmm->mmu_notifier.mm, start);
vma               907 mm/hmm.c       		if (vma == NULL || (vma->vm_flags & device_vma))
vma               910 mm/hmm.c       		if (!(vma->vm_flags & VM_READ)) {
vma               925 mm/hmm.c       		end = min(range->end, vma->vm_end);
vma               927 mm/hmm.c       		walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops,
vma               931 mm/hmm.c       			ret = walk_page_range(vma->vm_mm, start, end,
vma                65 mm/huge_memory.c bool transparent_hugepage_enabled(struct vm_area_struct *vma)
vma                68 mm/huge_memory.c 	unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
vma                70 mm/huge_memory.c 	if (!transhuge_vma_suitable(vma, addr))
vma                72 mm/huge_memory.c 	if (vma_is_anonymous(vma))
vma                73 mm/huge_memory.c 		return __transparent_hugepage_enabled(vma);
vma                74 mm/huge_memory.c 	if (vma_is_shmem(vma))
vma                75 mm/huge_memory.c 		return shmem_huge_enabled(vma);
vma               484 mm/huge_memory.c pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
vma               486 mm/huge_memory.c 	if (likely(vma->vm_flags & VM_WRITE))
vma               578 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma               586 mm/huge_memory.c 	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
vma               592 mm/huge_memory.c 	pgtable = pte_alloc_one(vma->vm_mm);
vma               606 mm/huge_memory.c 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vma               612 mm/huge_memory.c 		ret = check_stable_address_space(vma->vm_mm);
vma               617 mm/huge_memory.c 		if (userfaultfd_missing(vma)) {
vma               623 mm/huge_memory.c 			pte_free(vma->vm_mm, pgtable);
vma               629 mm/huge_memory.c 		entry = mk_huge_pmd(page, vma->vm_page_prot);
vma               630 mm/huge_memory.c 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
vma               631 mm/huge_memory.c 		page_add_new_anon_rmap(page, vma, haddr, true);
vma               633 mm/huge_memory.c 		lru_cache_add_active_or_unevictable(page, vma);
vma               634 mm/huge_memory.c 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
vma               635 mm/huge_memory.c 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
vma               636 mm/huge_memory.c 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
vma               637 mm/huge_memory.c 		mm_inc_nr_ptes(vma->vm_mm);
vma               648 mm/huge_memory.c 		pte_free(vma->vm_mm, pgtable);
vma               664 mm/huge_memory.c static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
vma               666 mm/huge_memory.c 	const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
vma               692 mm/huge_memory.c 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
vma               698 mm/huge_memory.c 	entry = mk_pmd(zero_page, vma->vm_page_prot);
vma               709 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma               714 mm/huge_memory.c 	if (!transhuge_vma_suitable(vma, haddr))
vma               716 mm/huge_memory.c 	if (unlikely(anon_vma_prepare(vma)))
vma               718 mm/huge_memory.c 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
vma               721 mm/huge_memory.c 			!mm_forbids_zeropage(vma->vm_mm) &&
vma               727 mm/huge_memory.c 		pgtable = pte_alloc_one(vma->vm_mm);
vma               730 mm/huge_memory.c 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
vma               732 mm/huge_memory.c 			pte_free(vma->vm_mm, pgtable);
vma               736 mm/huge_memory.c 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vma               740 mm/huge_memory.c 			ret = check_stable_address_space(vma->vm_mm);
vma               743 mm/huge_memory.c 			} else if (userfaultfd_missing(vma)) {
vma               748 mm/huge_memory.c 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
vma               756 mm/huge_memory.c 			pte_free(vma->vm_mm, pgtable);
vma               759 mm/huge_memory.c 	gfp = alloc_hugepage_direct_gfpmask(vma);
vma               760 mm/huge_memory.c 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
vma               769 mm/huge_memory.c static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               773 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma               785 mm/huge_memory.c 			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
vma               786 mm/huge_memory.c 			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
vma               787 mm/huge_memory.c 				update_mmu_cache_pmd(vma, addr, pmd);
vma               798 mm/huge_memory.c 		entry = maybe_pmd_mkwrite(entry, vma);
vma               808 mm/huge_memory.c 	update_mmu_cache_pmd(vma, addr, pmd);
vma               819 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma               820 mm/huge_memory.c 	pgprot_t pgprot = vma->vm_page_prot;
vma               828 mm/huge_memory.c 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
vma               830 mm/huge_memory.c 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
vma               832 mm/huge_memory.c 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
vma               834 mm/huge_memory.c 	if (addr < vma->vm_start || addr >= vma->vm_end)
vma               838 mm/huge_memory.c 		pgtable = pte_alloc_one(vma->vm_mm);
vma               843 mm/huge_memory.c 	track_pfn_insert(vma, &pgprot, pfn);
vma               845 mm/huge_memory.c 	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
vma               851 mm/huge_memory.c static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
vma               853 mm/huge_memory.c 	if (likely(vma->vm_flags & VM_WRITE))
vma               858 mm/huge_memory.c static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
vma               861 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma               873 mm/huge_memory.c 			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
vma               874 mm/huge_memory.c 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
vma               875 mm/huge_memory.c 				update_mmu_cache_pud(vma, addr, pud);
vma               885 mm/huge_memory.c 		entry = maybe_pud_mkwrite(entry, vma);
vma               888 mm/huge_memory.c 	update_mmu_cache_pud(vma, addr, pud);
vma               897 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma               898 mm/huge_memory.c 	pgprot_t pgprot = vma->vm_page_prot;
vma               905 mm/huge_memory.c 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
vma               907 mm/huge_memory.c 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
vma               909 mm/huge_memory.c 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
vma               911 mm/huge_memory.c 	if (addr < vma->vm_start || addr >= vma->vm_end)
vma               914 mm/huge_memory.c 	track_pfn_insert(vma, &pgprot, pfn);
vma               916 mm/huge_memory.c 	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
vma               922 mm/huge_memory.c static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               930 mm/huge_memory.c 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
vma               932 mm/huge_memory.c 		update_mmu_cache_pmd(vma, addr, pmd);
vma               935 mm/huge_memory.c struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               939 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma               959 mm/huge_memory.c 		touch_pmd(vma, addr, pmd, flags);
vma               980 mm/huge_memory.c 		  struct vm_area_struct *vma)
vma               989 mm/huge_memory.c 	if (!vma_is_anonymous(vma))
vma              1041 mm/huge_memory.c 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
vma              1068 mm/huge_memory.c static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
vma              1076 mm/huge_memory.c 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
vma              1078 mm/huge_memory.c 		update_mmu_cache_pud(vma, addr, pud);
vma              1081 mm/huge_memory.c struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
vma              1085 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              1099 mm/huge_memory.c 		touch_pud(vma, addr, pud, flags);
vma              1120 mm/huge_memory.c 		  struct vm_area_struct *vma)
vma              1161 mm/huge_memory.c 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
vma              1169 mm/huge_memory.c 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
vma              1170 mm/huge_memory.c 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
vma              1183 mm/huge_memory.c 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
vma              1191 mm/huge_memory.c 	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
vma              1192 mm/huge_memory.c 		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
vma              1201 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma              1219 mm/huge_memory.c 		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
vma              1222 mm/huge_memory.c 			     mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
vma              1242 mm/huge_memory.c 				   haddr + PAGE_SIZE * i, vma);
vma              1247 mm/huge_memory.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              1251 mm/huge_memory.c 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vma              1264 mm/huge_memory.c 	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
vma              1266 mm/huge_memory.c 	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
vma              1267 mm/huge_memory.c 	pmd_populate(vma->vm_mm, &_pmd, pgtable);
vma              1271 mm/huge_memory.c 		entry = mk_pte(pages[i], vma->vm_page_prot);
vma              1272 mm/huge_memory.c 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
vma              1275 mm/huge_memory.c 		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
vma              1277 mm/huge_memory.c 		lru_cache_add_active_or_unevictable(pages[i], vma);
vma              1280 mm/huge_memory.c 		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
vma              1286 mm/huge_memory.c 	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
vma              1317 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma              1325 mm/huge_memory.c 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
vma              1326 mm/huge_memory.c 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
vma              1354 mm/huge_memory.c 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
vma              1355 mm/huge_memory.c 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
vma              1356 mm/huge_memory.c 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
vma              1365 mm/huge_memory.c 	if (__transparent_hugepage_enabled(vma) &&
vma              1367 mm/huge_memory.c 		huge_gfp = alloc_hugepage_direct_gfpmask(vma);
vma              1368 mm/huge_memory.c 		new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
vma              1376 mm/huge_memory.c 			split_huge_pmd(vma, vmf->pmd, vmf->address);
vma              1381 mm/huge_memory.c 				split_huge_pmd(vma, vmf->pmd, vmf->address);
vma              1390 mm/huge_memory.c 	if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
vma              1393 mm/huge_memory.c 		split_huge_pmd(vma, vmf->pmd, vmf->address);
vma              1408 mm/huge_memory.c 				    vma, HPAGE_PMD_NR);
vma              1411 mm/huge_memory.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              1425 mm/huge_memory.c 		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
vma              1426 mm/huge_memory.c 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
vma              1427 mm/huge_memory.c 		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
vma              1428 mm/huge_memory.c 		page_add_new_anon_rmap(new_page, vma, haddr, true);
vma              1430 mm/huge_memory.c 		lru_cache_add_active_or_unevictable(new_page, vma);
vma              1431 mm/huge_memory.c 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
vma              1432 mm/huge_memory.c 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
vma              1434 mm/huge_memory.c 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
vma              1465 mm/huge_memory.c struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
vma              1470 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              1489 mm/huge_memory.c 		touch_pmd(vma, addr, pmd, flags);
vma              1490 mm/huge_memory.c 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
vma              1536 mm/huge_memory.c 	struct vm_area_struct *vma = vmf->vma;
vma              1547 mm/huge_memory.c 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vma              1584 mm/huge_memory.c 	target_nid = mpol_misplaced(page, vma, haddr);
vma              1636 mm/huge_memory.c 	if (mm_tlb_flush_pending(vma->vm_mm)) {
vma              1637 mm/huge_memory.c 		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
vma              1647 mm/huge_memory.c 		mmu_notifier_invalidate_range(vma->vm_mm, haddr,
vma              1657 mm/huge_memory.c 	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
vma              1669 mm/huge_memory.c 	pmd = pmd_modify(pmd, vma->vm_page_prot);
vma              1673 mm/huge_memory.c 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
vma              1674 mm/huge_memory.c 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
vma              1694 mm/huge_memory.c bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
vma              1705 mm/huge_memory.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma              1748 mm/huge_memory.c 		pmdp_invalidate(vma, addr, pmd);
vma              1773 mm/huge_memory.c int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
vma              1781 mm/huge_memory.c 	ptl = __pmd_trans_huge_lock(pmd, vma);
vma              1793 mm/huge_memory.c 	if (vma_is_dax(vma)) {
vma              1841 mm/huge_memory.c 					 struct vm_area_struct *vma)
vma              1849 mm/huge_memory.c 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
vma              1864 mm/huge_memory.c bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
vma              1870 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              1891 mm/huge_memory.c 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
vma              1901 mm/huge_memory.c 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
vma              1909 mm/huge_memory.c 			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
vma              1924 mm/huge_memory.c int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
vma              1927 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              1933 mm/huge_memory.c 	ptl = __pmd_trans_huge_lock(pmd, vma);
vma              1993 mm/huge_memory.c 	entry = pmdp_invalidate(vma, addr, pmd);
vma              2000 mm/huge_memory.c 	BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
vma              2012 mm/huge_memory.c spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
vma              2015 mm/huge_memory.c 	ptl = pmd_lock(vma->vm_mm, pmd);
vma              2029 mm/huge_memory.c spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
vma              2033 mm/huge_memory.c 	ptl = pud_lock(vma->vm_mm, pud);
vma              2041 mm/huge_memory.c int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
vma              2046 mm/huge_memory.c 	ptl = __pud_trans_huge_lock(pud, vma);
vma              2057 mm/huge_memory.c 	if (vma_is_dax(vma)) {
vma              2067 mm/huge_memory.c static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
vma              2071 mm/huge_memory.c 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
vma              2072 mm/huge_memory.c 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
vma              2077 mm/huge_memory.c 	pudp_huge_clear_flush_notify(vma, haddr, pud);
vma              2080 mm/huge_memory.c void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
vma              2086 mm/huge_memory.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              2090 mm/huge_memory.c 	ptl = pud_lock(vma->vm_mm, pud);
vma              2093 mm/huge_memory.c 	__split_huge_pud_locked(vma, pud, range.start);
vma              2105 mm/huge_memory.c static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
vma              2108 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              2121 mm/huge_memory.c 	pmdp_huge_clear_flush(vma, haddr, pmd);
vma              2128 mm/huge_memory.c 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
vma              2139 mm/huge_memory.c static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
vma              2142 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              2151 mm/huge_memory.c 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
vma              2152 mm/huge_memory.c 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
vma              2158 mm/huge_memory.c 	if (!vma_is_anonymous(vma)) {
vma              2159 mm/huge_memory.c 		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
vma              2166 mm/huge_memory.c 		if (vma_is_dax(vma))
vma              2187 mm/huge_memory.c 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
vma              2210 mm/huge_memory.c 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
vma              2253 mm/huge_memory.c 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
vma              2254 mm/huge_memory.c 			entry = maybe_mkwrite(entry, vma);
vma              2299 mm/huge_memory.c void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
vma              2305 mm/huge_memory.c 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              2309 mm/huge_memory.c 	ptl = pmd_lock(vma->vm_mm, pmd);
vma              2325 mm/huge_memory.c 	__split_huge_pmd_locked(vma, pmd, range.start, freeze);
vma              2344 mm/huge_memory.c void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
vma              2352 mm/huge_memory.c 	pgd = pgd_offset(vma->vm_mm, address);
vma              2366 mm/huge_memory.c 	__split_huge_pmd(vma, pmd, address, freeze, page);
vma              2369 mm/huge_memory.c void vma_adjust_trans_huge(struct vm_area_struct *vma,
vma              2380 mm/huge_memory.c 	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
vma              2381 mm/huge_memory.c 	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
vma              2382 mm/huge_memory.c 		split_huge_pmd_address(vma, start, false, NULL);
vma              2390 mm/huge_memory.c 	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
vma              2391 mm/huge_memory.c 	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
vma              2392 mm/huge_memory.c 		split_huge_pmd_address(vma, end, false, NULL);
vma              2400 mm/huge_memory.c 		struct vm_area_struct *next = vma->vm_next;
vma              3023 mm/huge_memory.c 	struct vm_area_struct *vma = pvmw->vma;
vma              3024 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              3033 mm/huge_memory.c 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
vma              3034 mm/huge_memory.c 	pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
vma              3048 mm/huge_memory.c 	struct vm_area_struct *vma = pvmw->vma;
vma              3049 mm/huge_memory.c 	struct mm_struct *mm = vma->vm_mm;
vma              3060 mm/huge_memory.c 	pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
vma              3064 mm/huge_memory.c 		pmde = maybe_pmd_mkwrite(pmde, vma);
vma              3066 mm/huge_memory.c 	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
vma              3068 mm/huge_memory.c 		page_add_anon_rmap(new, vma, mmun_start, true);
vma              3072 mm/huge_memory.c 	if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
vma              3074 mm/huge_memory.c 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
vma               218 mm/hugetlb.c   static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
vma               220 mm/hugetlb.c   	return subpool_inode(file_inode(vma->vm_file));
vma               624 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long address)
vma               626 mm/hugetlb.c   	return ((address - vma->vm_start) >> huge_page_shift(h)) +
vma               627 mm/hugetlb.c   			(vma->vm_pgoff >> huge_page_order(h));
vma               630 mm/hugetlb.c   pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
vma               633 mm/hugetlb.c   	return vma_hugecache_offset(hstate_vma(vma), vma, address);
vma               641 mm/hugetlb.c   unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
vma               643 mm/hugetlb.c   	if (vma->vm_ops && vma->vm_ops->pagesize)
vma               644 mm/hugetlb.c   		return vma->vm_ops->pagesize(vma);
vma               655 mm/hugetlb.c   __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
vma               657 mm/hugetlb.c   	return vma_kernel_pagesize(vma);
vma               688 mm/hugetlb.c   static unsigned long get_vma_private_data(struct vm_area_struct *vma)
vma               690 mm/hugetlb.c   	return (unsigned long)vma->vm_private_data;
vma               693 mm/hugetlb.c   static void set_vma_private_data(struct vm_area_struct *vma,
vma               696 mm/hugetlb.c   	vma->vm_private_data = (void *)value;
vma               756 mm/hugetlb.c   static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
vma               758 mm/hugetlb.c   	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
vma               759 mm/hugetlb.c   	if (vma->vm_flags & VM_MAYSHARE) {
vma               760 mm/hugetlb.c   		struct address_space *mapping = vma->vm_file->f_mapping;
vma               766 mm/hugetlb.c   		return (struct resv_map *)(get_vma_private_data(vma) &
vma               771 mm/hugetlb.c   static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
vma               773 mm/hugetlb.c   	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
vma               774 mm/hugetlb.c   	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
vma               776 mm/hugetlb.c   	set_vma_private_data(vma, (get_vma_private_data(vma) &
vma               780 mm/hugetlb.c   static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
vma               782 mm/hugetlb.c   	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
vma               783 mm/hugetlb.c   	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
vma               785 mm/hugetlb.c   	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
vma               788 mm/hugetlb.c   static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
vma               790 mm/hugetlb.c   	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
vma               792 mm/hugetlb.c   	return (get_vma_private_data(vma) & flag) != 0;
vma               796 mm/hugetlb.c   void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
vma               798 mm/hugetlb.c   	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
vma               799 mm/hugetlb.c   	if (!(vma->vm_flags & VM_MAYSHARE))
vma               800 mm/hugetlb.c   		vma->vm_private_data = (void *)0;
vma               804 mm/hugetlb.c   static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
vma               806 mm/hugetlb.c   	if (vma->vm_flags & VM_NORESERVE) {
vma               816 mm/hugetlb.c   		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
vma               823 mm/hugetlb.c   	if (vma->vm_flags & VM_MAYSHARE) {
vma               841 mm/hugetlb.c   	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
vma               940 mm/hugetlb.c   				struct vm_area_struct *vma,
vma               955 mm/hugetlb.c   	if (!vma_has_reserves(vma, chg) &&
vma               964 mm/hugetlb.c   	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
vma               966 mm/hugetlb.c   	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
vma              1738 mm/hugetlb.c   		struct vm_area_struct *vma, unsigned long addr)
vma              1746 mm/hugetlb.c   	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
vma              1795 mm/hugetlb.c   struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
vma              1805 mm/hugetlb.c   	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
vma              1986 mm/hugetlb.c   				struct vm_area_struct *vma, unsigned long addr,
vma              1993 mm/hugetlb.c   	resv = vma_resv_map(vma);
vma              1997 mm/hugetlb.c   	idx = vma_hugecache_offset(h, vma, addr);
vma              2010 mm/hugetlb.c   		if (vma->vm_flags & VM_MAYSHARE)
vma              2021 mm/hugetlb.c   	if (vma->vm_flags & VM_MAYSHARE)
vma              2023 mm/hugetlb.c   	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
vma              2047 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long addr)
vma              2049 mm/hugetlb.c   	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
vma              2053 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long addr)
vma              2055 mm/hugetlb.c   	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
vma              2059 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long addr)
vma              2061 mm/hugetlb.c   	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
vma              2065 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long addr)
vma              2067 mm/hugetlb.c   	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
vma              2082 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long address,
vma              2086 mm/hugetlb.c   		long rc = vma_needs_reservation(h, vma, address);
vma              2102 mm/hugetlb.c   			rc = vma_add_reservation(h, vma, address);
vma              2110 mm/hugetlb.c   			vma_end_reservation(h, vma, address);
vma              2114 mm/hugetlb.c   struct page *alloc_huge_page(struct vm_area_struct *vma,
vma              2117 mm/hugetlb.c   	struct hugepage_subpool *spool = subpool_vma(vma);
vma              2118 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              2131 mm/hugetlb.c   	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
vma              2145 mm/hugetlb.c   			vma_end_reservation(h, vma, addr);
vma              2171 mm/hugetlb.c   	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
vma              2174 mm/hugetlb.c   		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
vma              2177 mm/hugetlb.c   		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
vma              2190 mm/hugetlb.c   	map_commit = vma_commit_reservation(h, vma, addr);
vma              2213 mm/hugetlb.c   	vma_end_reservation(h, vma, addr);
vma              3279 mm/hugetlb.c   static void hugetlb_vm_op_open(struct vm_area_struct *vma)
vma              3281 mm/hugetlb.c   	struct resv_map *resv = vma_resv_map(vma);
vma              3291 mm/hugetlb.c   	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
vma              3295 mm/hugetlb.c   static void hugetlb_vm_op_close(struct vm_area_struct *vma)
vma              3297 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              3298 mm/hugetlb.c   	struct resv_map *resv = vma_resv_map(vma);
vma              3299 mm/hugetlb.c   	struct hugepage_subpool *spool = subpool_vma(vma);
vma              3303 mm/hugetlb.c   	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
vma              3306 mm/hugetlb.c   	start = vma_hugecache_offset(h, vma, vma->vm_start);
vma              3307 mm/hugetlb.c   	end = vma_hugecache_offset(h, vma, vma->vm_end);
vma              3323 mm/hugetlb.c   static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
vma              3325 mm/hugetlb.c   	if (addr & ~(huge_page_mask(hstate_vma(vma))))
vma              3330 mm/hugetlb.c   static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
vma              3332 mm/hugetlb.c   	struct hstate *hstate = hstate_vma(vma);
vma              3364 mm/hugetlb.c   static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
vma              3371 mm/hugetlb.c   					 vma->vm_page_prot)));
vma              3374 mm/hugetlb.c   					   vma->vm_page_prot));
vma              3378 mm/hugetlb.c   	entry = arch_make_huge_pte(entry, vma, page, writable);
vma              3383 mm/hugetlb.c   static void set_huge_ptep_writable(struct vm_area_struct *vma,
vma              3389 mm/hugetlb.c   	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
vma              3390 mm/hugetlb.c   		update_mmu_cache(vma, address, ptep);
vma              3420 mm/hugetlb.c   			    struct vm_area_struct *vma)
vma              3426 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              3431 mm/hugetlb.c   	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
vma              3434 mm/hugetlb.c   		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
vma              3435 mm/hugetlb.c   					vma->vm_start,
vma              3436 mm/hugetlb.c   					vma->vm_end);
vma              3440 mm/hugetlb.c   	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
vma              3519 mm/hugetlb.c   void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
vma              3523 mm/hugetlb.c   	struct mm_struct *mm = vma->vm_mm;
vma              3529 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              3533 mm/hugetlb.c   	WARN_ON(!is_vm_hugetlb_page(vma));
vma              3542 mm/hugetlb.c   	tlb_start_vma(tlb, vma);
vma              3547 mm/hugetlb.c   	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
vma              3549 mm/hugetlb.c   	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
vma              3599 mm/hugetlb.c   			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
vma              3619 mm/hugetlb.c   	tlb_end_vma(tlb, vma);
vma              3623 mm/hugetlb.c   			  struct vm_area_struct *vma, unsigned long start,
vma              3626 mm/hugetlb.c   	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
vma              3638 mm/hugetlb.c   	vma->vm_flags &= ~VM_MAYSHARE;
vma              3641 mm/hugetlb.c   void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
vma              3656 mm/hugetlb.c   	adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
vma              3658 mm/hugetlb.c   	mm = vma->vm_mm;
vma              3661 mm/hugetlb.c   	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
vma              3671 mm/hugetlb.c   static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
vma              3674 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              3684 mm/hugetlb.c   	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma              3685 mm/hugetlb.c   			vma->vm_pgoff;
vma              3686 mm/hugetlb.c   	mapping = vma->vm_file->f_mapping;
vma              3696 mm/hugetlb.c   		if (iter_vma == vma)
vma              3727 mm/hugetlb.c   static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
vma              3732 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              3746 mm/hugetlb.c   		page_move_anon_rmap(old_page, vma);
vma              3747 mm/hugetlb.c   		set_huge_ptep_writable(vma, haddr, ptep);
vma              3760 mm/hugetlb.c   	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
vma              3771 mm/hugetlb.c   	new_page = alloc_huge_page(vma, haddr, outside_reserve);
vma              3784 mm/hugetlb.c   			unmap_ref_private(mm, vma, old_page, haddr);
vma              3806 mm/hugetlb.c   	if (unlikely(anon_vma_prepare(vma))) {
vma              3811 mm/hugetlb.c   	copy_user_huge_page(new_page, old_page, address, vma,
vma              3815 mm/hugetlb.c   	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
vma              3829 mm/hugetlb.c   		huge_ptep_clear_flush(vma, haddr, ptep);
vma              3832 mm/hugetlb.c   				make_huge_pte(vma, new_page, 1));
vma              3834 mm/hugetlb.c   		hugepage_add_new_anon_rmap(new_page, vma, haddr);
vma              3842 mm/hugetlb.c   	restore_reserve_on_error(h, vma, haddr, new_page);
vma              3853 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long address)
vma              3858 mm/hugetlb.c   	mapping = vma->vm_file->f_mapping;
vma              3859 mm/hugetlb.c   	idx = vma_hugecache_offset(h, vma, address);
vma              3869 mm/hugetlb.c   			struct vm_area_struct *vma, unsigned long address)
vma              3875 mm/hugetlb.c   	mapping = vma->vm_file->f_mapping;
vma              3876 mm/hugetlb.c   	idx = vma_hugecache_offset(h, vma, address);
vma              3908 mm/hugetlb.c   			struct vm_area_struct *vma,
vma              3912 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              3927 mm/hugetlb.c   	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
vma              3947 mm/hugetlb.c   		if (userfaultfd_missing(vma)) {
vma              3950 mm/hugetlb.c   				.vma = vma,
vma              3974 mm/hugetlb.c   		page = alloc_huge_page(vma, haddr, 0);
vma              4002 mm/hugetlb.c   		if (vma->vm_flags & VM_MAYSHARE) {
vma              4012 mm/hugetlb.c   			if (unlikely(anon_vma_prepare(vma))) {
vma              4037 mm/hugetlb.c   	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
vma              4038 mm/hugetlb.c   		if (vma_needs_reservation(h, vma, haddr) < 0) {
vma              4043 mm/hugetlb.c   		vma_end_reservation(h, vma, haddr);
vma              4057 mm/hugetlb.c   		hugepage_add_new_anon_rmap(page, vma, haddr);
vma              4060 mm/hugetlb.c   	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
vma              4061 mm/hugetlb.c   				&& (vma->vm_flags & VM_SHARED)));
vma              4065 mm/hugetlb.c   	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
vma              4067 mm/hugetlb.c   		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
vma              4088 mm/hugetlb.c   	restore_reserve_on_error(h, vma, haddr, page);
vma              4119 mm/hugetlb.c   vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vma              4129 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              4138 mm/hugetlb.c   			migration_entry_wait_huge(vma, mm, ptep);
vma              4149 mm/hugetlb.c   	mapping = vma->vm_file->f_mapping;
vma              4150 mm/hugetlb.c   	idx = vma_hugecache_offset(h, vma, haddr);
vma              4162 mm/hugetlb.c   		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
vma              4187 mm/hugetlb.c   		if (vma_needs_reservation(h, vma, haddr) < 0) {
vma              4192 mm/hugetlb.c   		vma_end_reservation(h, vma, haddr);
vma              4194 mm/hugetlb.c   		if (!(vma->vm_flags & VM_MAYSHARE))
vma              4196 mm/hugetlb.c   								vma, haddr);
vma              4221 mm/hugetlb.c   			ret = hugetlb_cow(mm, vma, address, ptep,
vma              4228 mm/hugetlb.c   	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
vma              4230 mm/hugetlb.c   		update_mmu_cache(vma, haddr, ptep);
vma              4387 mm/hugetlb.c   long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
vma              4395 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              4398 mm/hugetlb.c   	while (vaddr < vma->vm_end && remainder) {
vma              4434 mm/hugetlb.c   		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
vma              4471 mm/hugetlb.c   			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
vma              4518 mm/hugetlb.c   			vmas[i] = vma;
vma              4524 mm/hugetlb.c   		if (vaddr < vma->vm_end && remainder &&
vma              4550 mm/hugetlb.c   #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
vma              4553 mm/hugetlb.c   unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
vma              4556 mm/hugetlb.c   	struct mm_struct *mm = vma->vm_mm;
vma              4560 mm/hugetlb.c   	struct hstate *h = hstate_vma(vma);
vma              4571 mm/hugetlb.c   				0, vma, mm, start, end);
vma              4572 mm/hugetlb.c   	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
vma              4575 mm/hugetlb.c   	flush_cache_range(vma, range.start, range.end);
vma              4578 mm/hugetlb.c   	i_mmap_lock_write(vma->vm_file->f_mapping);
vma              4614 mm/hugetlb.c   			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
vma              4616 mm/hugetlb.c   			pte = arch_make_huge_pte(pte, vma, NULL, 0);
vma              4617 mm/hugetlb.c   			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
vma              4630 mm/hugetlb.c   		flush_hugetlb_tlb_range(vma, range.start, range.end);
vma              4632 mm/hugetlb.c   		flush_hugetlb_tlb_range(vma, start, end);
vma              4639 mm/hugetlb.c   	i_mmap_unlock_write(vma->vm_file->f_mapping);
vma              4647 mm/hugetlb.c   					struct vm_area_struct *vma,
vma              4676 mm/hugetlb.c   	if (!vma || vma->vm_flags & VM_MAYSHARE) {
vma              4693 mm/hugetlb.c   		set_vma_resv_map(vma, resv_map);
vma              4694 mm/hugetlb.c   		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
vma              4735 mm/hugetlb.c   	if (!vma || vma->vm_flags & VM_MAYSHARE) {
vma              4755 mm/hugetlb.c   	if (!vma || vma->vm_flags & VM_MAYSHARE)
vma              4759 mm/hugetlb.c   	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
vma              4804 mm/hugetlb.c   				struct vm_area_struct *vma,
vma              4813 mm/hugetlb.c   	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
vma              4828 mm/hugetlb.c   static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
vma              4836 mm/hugetlb.c   	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
vma              4846 mm/hugetlb.c   void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
vma              4851 mm/hugetlb.c   	if (!(vma->vm_flags & VM_MAYSHARE))
vma              4861 mm/hugetlb.c   		if (range_in_vma(vma, a_start, a_end)) {
vma              4881 mm/hugetlb.c   	struct vm_area_struct *vma = find_vma(mm, addr);
vma              4882 mm/hugetlb.c   	struct address_space *mapping = vma->vm_file->f_mapping;
vma              4883 mm/hugetlb.c   	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma              4884 mm/hugetlb.c   			vma->vm_pgoff;
vma              4891 mm/hugetlb.c   	if (!vma_shareable(vma, addr))
vma              4896 mm/hugetlb.c   		if (svma == vma)
vma              4899 mm/hugetlb.c   		saddr = page_table_shareable(svma, vma, addr, idx);
vma              4913 mm/hugetlb.c   	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
vma              4968 mm/hugetlb.c   void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
vma              5062 mm/hugetlb.c   follow_huge_pd(struct vm_area_struct *vma,
vma                42 mm/internal.h  static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
vma                44 mm/internal.h  	return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
vma                48 mm/internal.h  			     struct vm_area_struct *vma,
vma               292 mm/internal.h  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
vma               296 mm/internal.h  extern long populate_vma_page_range(struct vm_area_struct *vma,
vma               298 mm/internal.h  extern void munlock_vma_pages_range(struct vm_area_struct *vma,
vma               300 mm/internal.h  static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
vma               302 mm/internal.h  	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
vma               339 mm/internal.h  extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
vma               345 mm/internal.h  __vma_address(struct page *page, struct vm_area_struct *vma)
vma               348 mm/internal.h  	return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
vma               352 mm/internal.h  vma_address(struct page *page, struct vm_area_struct *vma)
vma               356 mm/internal.h  	start = __vma_address(page, vma);
vma               360 mm/internal.h  	VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
vma               362 mm/internal.h  	return max(start, vma->vm_start);
vma               380 mm/internal.h  		fpin = get_file(vmf->vma->vm_file);
vma               381 mm/internal.h  		up_read(&vmf->vma->vm_mm->mmap_sem);
vma                63 mm/interval_tree.c 	return vma_start_pgoff(avc->vma);
vma                68 mm/interval_tree.c 	return vma_last_pgoff(avc->vma);
vma               313 mm/khugepaged.c int hugepage_madvise(struct vm_area_struct *vma,
vma               324 mm/khugepaged.c 		if (mm_has_pgste(vma->vm_mm))
vma               335 mm/khugepaged.c 				khugepaged_enter_vma_merge(vma, *vm_flags))
vma               407 mm/khugepaged.c static bool hugepage_vma_check(struct vm_area_struct *vma,
vma               412 mm/khugepaged.c 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
vma               415 mm/khugepaged.c 	if (shmem_file(vma->vm_file) ||
vma               417 mm/khugepaged.c 	     vma->vm_file &&
vma               421 mm/khugepaged.c 		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
vma               424 mm/khugepaged.c 	if (!vma->anon_vma || vma->vm_ops)
vma               426 mm/khugepaged.c 	if (is_vma_temporary_stack(vma))
vma               464 mm/khugepaged.c int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
vma               474 mm/khugepaged.c 	if (!hugepage_vma_check(vma, vm_flags))
vma               477 mm/khugepaged.c 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
vma               478 mm/khugepaged.c 	hend = vma->vm_end & HPAGE_PMD_MASK;
vma               480 mm/khugepaged.c 		return khugepaged_enter(vma, vm_flags);
vma               532 mm/khugepaged.c static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
vma               546 mm/khugepaged.c 			if (!userfaultfd_armed(vma) &&
vma               558 mm/khugepaged.c 		page = vm_normal_page(vma, address, pteval);
vma               625 mm/khugepaged.c 		    mmu_notifier_test_young(vma->vm_mm, address))
vma               647 mm/khugepaged.c 				      struct vm_area_struct *vma,
vma               659 mm/khugepaged.c 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
vma               669 mm/khugepaged.c 				pte_clear(vma->vm_mm, address, _pte);
vma               674 mm/khugepaged.c 			copy_user_highpage(page, src_page, address, vma);
vma               687 mm/khugepaged.c 			pte_clear(vma->vm_mm, address, _pte);
vma               863 mm/khugepaged.c 	struct vm_area_struct *vma;
vma               869 mm/khugepaged.c 	*vmap = vma = find_vma(mm, address);
vma               870 mm/khugepaged.c 	if (!vma)
vma               873 mm/khugepaged.c 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
vma               874 mm/khugepaged.c 	hend = vma->vm_end & HPAGE_PMD_MASK;
vma               877 mm/khugepaged.c 	if (!hugepage_vma_check(vma, vma->vm_flags))
vma               891 mm/khugepaged.c 					struct vm_area_struct *vma,
vma               898 mm/khugepaged.c 		.vma = vma,
vma               902 mm/khugepaged.c 		.pgoff = linear_page_index(vma, address),
vma               922 mm/khugepaged.c 			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
vma               958 mm/khugepaged.c 	struct vm_area_struct *vma;
vma               986 mm/khugepaged.c 	result = hugepage_vma_revalidate(mm, address, &vma);
vma              1006 mm/khugepaged.c 	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
vma              1022 mm/khugepaged.c 	result = hugepage_vma_revalidate(mm, address, &vma);
vma              1029 mm/khugepaged.c 	anon_vma_lock_write(vma->anon_vma);
vma              1045 mm/khugepaged.c 	_pmd = pmdp_collapse_flush(vma, address, pmd);
vma              1050 mm/khugepaged.c 	isolated = __collapse_huge_page_isolate(vma, address, pte);
vma              1064 mm/khugepaged.c 		anon_vma_unlock_write(vma->anon_vma);
vma              1073 mm/khugepaged.c 	anon_vma_unlock_write(vma->anon_vma);
vma              1075 mm/khugepaged.c 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
vma              1080 mm/khugepaged.c 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
vma              1081 mm/khugepaged.c 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
vma              1092 mm/khugepaged.c 	page_add_new_anon_rmap(new_page, vma, address, true);
vma              1095 mm/khugepaged.c 	lru_cache_add_active_or_unevictable(new_page, vma);
vma              1098 mm/khugepaged.c 	update_mmu_cache_pmd(vma, address, pmd);
vma              1116 mm/khugepaged.c 			       struct vm_area_struct *vma,
vma              1151 mm/khugepaged.c 			if (!userfaultfd_armed(vma) &&
vma              1166 mm/khugepaged.c 		page = vm_normal_page(vma, _address, pteval);
vma              1214 mm/khugepaged.c 		    mmu_notifier_test_young(vma->vm_mm, address))
vma              1293 mm/khugepaged.c 	struct vm_area_struct *vma = find_vma(mm, haddr);
vma              1301 mm/khugepaged.c 	if (!vma || !vma->vm_file ||
vma              1302 mm/khugepaged.c 	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
vma              1311 mm/khugepaged.c 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
vma              1333 mm/khugepaged.c 		page = vm_normal_page(vma, addr, *pte);
vma              1347 mm/khugepaged.c 			if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
vma              1370 mm/khugepaged.c 		page = vm_normal_page(vma, addr, *pte);
vma              1379 mm/khugepaged.c 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
vma              1383 mm/khugepaged.c 	ptl = pmd_lock(vma->vm_mm, pmd);
vma              1384 mm/khugepaged.c 	_pmd = pmdp_collapse_flush(vma, addr, pmd);
vma              1419 mm/khugepaged.c 	struct vm_area_struct *vma;
vma              1424 mm/khugepaged.c 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
vma              1441 mm/khugepaged.c 		if (vma->anon_vma)
vma              1443 mm/khugepaged.c 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
vma              1446 mm/khugepaged.c 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
vma              1448 mm/khugepaged.c 		pmd = mm_find_pmd(vma->vm_mm, addr);
vma              1458 mm/khugepaged.c 		if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
vma              1459 mm/khugepaged.c 			spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
vma              1461 mm/khugepaged.c 			_pmd = pmdp_collapse_flush(vma, addr, pmd);
vma              1463 mm/khugepaged.c 			up_write(&vma->vm_mm->mmap_sem);
vma              1464 mm/khugepaged.c 			mm_dec_nr_ptes(vma->vm_mm);
vma              1465 mm/khugepaged.c 			pte_free(vma->vm_mm, pmd_pgtable(_pmd));
vma              1468 mm/khugepaged.c 			khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
vma              1910 mm/khugepaged.c 	struct vm_area_struct *vma;
vma              1932 mm/khugepaged.c 	vma = NULL;
vma              1936 mm/khugepaged.c 		vma = find_vma(mm, khugepaged_scan.address);
vma              1939 mm/khugepaged.c 	for (; vma; vma = vma->vm_next) {
vma              1947 mm/khugepaged.c 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
vma              1952 mm/khugepaged.c 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
vma              1953 mm/khugepaged.c 		hend = vma->vm_end & HPAGE_PMD_MASK;
vma              1971 mm/khugepaged.c 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
vma              1973 mm/khugepaged.c 				pgoff_t pgoff = linear_page_index(vma,
vma              1976 mm/khugepaged.c 				if (shmem_file(vma->vm_file)
vma              1977 mm/khugepaged.c 				    && !shmem_huge_enabled(vma))
vma              1979 mm/khugepaged.c 				file = get_file(vma->vm_file);
vma              1985 mm/khugepaged.c 				ret = khugepaged_scan_pmd(mm, vma,
vma              2009 mm/khugepaged.c 	if (khugepaged_test_exit(mm) || !vma) {
vma               470 mm/ksm.c       static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
vma               477 mm/ksm.c       		page = follow_page(vma, addr,
vma               482 mm/ksm.c       			ret = handle_mm_fault(vma, addr,
vma               522 mm/ksm.c       	struct vm_area_struct *vma;
vma               525 mm/ksm.c       	vma = find_vma(mm, addr);
vma               526 mm/ksm.c       	if (!vma || vma->vm_start > addr)
vma               528 mm/ksm.c       	if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
vma               530 mm/ksm.c       	return vma;
vma               537 mm/ksm.c       	struct vm_area_struct *vma;
vma               546 mm/ksm.c       	vma = find_mergeable_vma(mm, addr);
vma               547 mm/ksm.c       	if (vma)
vma               548 mm/ksm.c       		break_ksm(vma, addr);
vma               556 mm/ksm.c       	struct vm_area_struct *vma;
vma               560 mm/ksm.c       	vma = find_mergeable_vma(mm, addr);
vma               561 mm/ksm.c       	if (!vma)
vma               564 mm/ksm.c       	page = follow_page(vma, addr, FOLL_GET);
vma               568 mm/ksm.c       		flush_anon_page(vma, page, addr);
vma               843 mm/ksm.c       static int unmerge_ksm_pages(struct vm_area_struct *vma,
vma               850 mm/ksm.c       		if (ksm_test_exit(vma->vm_mm))
vma               855 mm/ksm.c       			err = break_ksm(vma, addr);
vma               968 mm/ksm.c       	struct vm_area_struct *vma;
vma               980 mm/ksm.c       		for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               983 mm/ksm.c       			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
vma               985 mm/ksm.c       			err = unmerge_ksm_pages(vma,
vma               986 mm/ksm.c       						vma->vm_start, vma->vm_end);
vma              1032 mm/ksm.c       static int write_protect_page(struct vm_area_struct *vma, struct page *page,
vma              1035 mm/ksm.c       	struct mm_struct *mm = vma->vm_mm;
vma              1038 mm/ksm.c       		.vma = vma,
vma              1044 mm/ksm.c       	pvmw.address = page_address_in_vma(page, vma);
vma              1050 mm/ksm.c       	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
vma              1066 mm/ksm.c       		flush_cache_page(vma, pvmw.address, page_to_pfn(page));
vma              1081 mm/ksm.c       		entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
vma              1119 mm/ksm.c       static int replace_page(struct vm_area_struct *vma, struct page *page,
vma              1122 mm/ksm.c       	struct mm_struct *mm = vma->vm_mm;
vma              1131 mm/ksm.c       	addr = page_address_in_vma(page, vma);
vma              1139 mm/ksm.c       	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
vma              1155 mm/ksm.c       		page_add_anon_rmap(kpage, vma, addr, false);
vma              1156 mm/ksm.c       		newpte = mk_pte(kpage, vma->vm_page_prot);
vma              1159 mm/ksm.c       					       vma->vm_page_prot));
vma              1169 mm/ksm.c       	flush_cache_page(vma, addr, pte_pfn(*ptep));
vma              1176 mm/ksm.c       	ptep_clear_flush(vma, addr, ptep);
vma              1201 mm/ksm.c       static int try_to_merge_one_page(struct vm_area_struct *vma,
vma              1234 mm/ksm.c       	if (write_protect_page(vma, page, &orig_pte) == 0) {
vma              1251 mm/ksm.c       			err = replace_page(vma, page, kpage, orig_pte);
vma              1254 mm/ksm.c       	if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
vma              1280 mm/ksm.c       	struct vm_area_struct *vma;
vma              1284 mm/ksm.c       	vma = find_mergeable_vma(mm, rmap_item->address);
vma              1285 mm/ksm.c       	if (!vma)
vma              1288 mm/ksm.c       	err = try_to_merge_one_page(vma, page, kpage);
vma              1296 mm/ksm.c       	rmap_item->anon_vma = vma->anon_vma;
vma              1297 mm/ksm.c       	get_anon_vma(vma->anon_vma);
vma              2111 mm/ksm.c       		struct vm_area_struct *vma;
vma              2114 mm/ksm.c       		vma = find_mergeable_vma(mm, rmap_item->address);
vma              2115 mm/ksm.c       		if (vma) {
vma              2116 mm/ksm.c       			err = try_to_merge_one_page(vma, page,
vma              2228 mm/ksm.c       	struct vm_area_struct *vma;
vma              2290 mm/ksm.c       		vma = NULL;
vma              2292 mm/ksm.c       		vma = find_vma(mm, ksm_scan.address);
vma              2294 mm/ksm.c       	for (; vma; vma = vma->vm_next) {
vma              2295 mm/ksm.c       		if (!(vma->vm_flags & VM_MERGEABLE))
vma              2297 mm/ksm.c       		if (ksm_scan.address < vma->vm_start)
vma              2298 mm/ksm.c       			ksm_scan.address = vma->vm_start;
vma              2299 mm/ksm.c       		if (!vma->anon_vma)
vma              2300 mm/ksm.c       			ksm_scan.address = vma->vm_end;
vma              2302 mm/ksm.c       		while (ksm_scan.address < vma->vm_end) {
vma              2305 mm/ksm.c       			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
vma              2312 mm/ksm.c       				flush_anon_page(vma, *page, ksm_scan.address);
vma              2436 mm/ksm.c       int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
vma              2439 mm/ksm.c       	struct mm_struct *mm = vma->vm_mm;
vma              2452 mm/ksm.c       		if (vma_is_dax(vma))
vma              2477 mm/ksm.c       		if (vma->anon_vma) {
vma              2478 mm/ksm.c       			err = unmerge_ksm_pages(vma, start, end);
vma              2568 mm/ksm.c       			struct vm_area_struct *vma, unsigned long address)
vma              2579 mm/ksm.c       	} else if (anon_vma->root == vma->anon_vma->root &&
vma              2580 mm/ksm.c       		 page->index == linear_page_index(vma, address)) {
vma              2586 mm/ksm.c       	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
vma              2588 mm/ksm.c       		copy_user_highpage(new_page, page, address, vma);
vma              2619 mm/ksm.c       		struct vm_area_struct *vma;
vma              2628 mm/ksm.c       			vma = vmac->vma;
vma              2633 mm/ksm.c       			if (addr < vma->vm_start || addr >= vma->vm_end)
vma              2641 mm/ksm.c       			if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
vma              2644 mm/ksm.c       			if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
vma              2647 mm/ksm.c       			if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
vma              2663 mm/ksm.c       		    struct vm_area_struct *vma,
vma              2681 mm/ksm.c       	page_move_anon_rmap(page, vma);
vma              2682 mm/ksm.c       	page->index = linear_page_index(vma, address);
vma                65 mm/madvise.c   static long madvise_behavior(struct vm_area_struct *vma,
vma                69 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
vma                72 mm/madvise.c   	unsigned long new_flags = vma->vm_flags;
vma                88 mm/madvise.c   		if (vma->vm_flags & VM_IO) {
vma                96 mm/madvise.c   		if (vma->vm_file || vma->vm_flags & VM_SHARED) {
vma               109 mm/madvise.c   		if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
vma               117 mm/madvise.c   		error = ksm_madvise(vma, start, end, behavior, &new_flags);
vma               123 mm/madvise.c   		error = hugepage_madvise(vma, &new_flags, behavior);
vma               129 mm/madvise.c   	if (new_flags == vma->vm_flags) {
vma               130 mm/madvise.c   		*prev = vma;
vma               134 mm/madvise.c   	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
vma               135 mm/madvise.c   	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
vma               136 mm/madvise.c   			  vma->vm_file, pgoff, vma_policy(vma),
vma               137 mm/madvise.c   			  vma->vm_userfaultfd_ctx);
vma               139 mm/madvise.c   		vma = *prev;
vma               143 mm/madvise.c   	*prev = vma;
vma               145 mm/madvise.c   	if (start != vma->vm_start) {
vma               150 mm/madvise.c   		error = __split_vma(mm, vma, start, 1);
vma               155 mm/madvise.c   	if (end != vma->vm_end) {
vma               160 mm/madvise.c   		error = __split_vma(mm, vma, end, 0);
vma               169 mm/madvise.c   	vma->vm_flags = new_flags;
vma               187 mm/madvise.c   	struct vm_area_struct *vma = walk->private;
vma               199 mm/madvise.c   		orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
vma               210 mm/madvise.c   							vma, index, false);
vma               222 mm/madvise.c   static void force_shm_swapin_readahead(struct vm_area_struct *vma,
vma               231 mm/madvise.c   		index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
vma               253 mm/madvise.c   static long madvise_willneed(struct vm_area_struct *vma,
vma               257 mm/madvise.c   	struct file *file = vma->vm_file;
vma               260 mm/madvise.c   	*prev = vma;
vma               263 mm/madvise.c   		walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
vma               269 mm/madvise.c   		force_shm_swapin_readahead(vma, start, end,
vma               292 mm/madvise.c   	offset = (loff_t)(start - vma->vm_start)
vma               293 mm/madvise.c   			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vma               308 mm/madvise.c   	struct vm_area_struct *vma = walk->vma;
vma               323 mm/madvise.c   		ptl = pmd_trans_huge_lock(pmd, vma);
vma               358 mm/madvise.c   			pmdp_invalidate(vma, addr, pmd);
vma               388 mm/madvise.c   	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma               400 mm/madvise.c   		page = vm_normal_page(vma, addr, ptent);
vma               478 mm/madvise.c   			     struct vm_area_struct *vma,
vma               486 mm/madvise.c   	tlb_start_vma(tlb, vma);
vma               487 mm/madvise.c   	walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
vma               488 mm/madvise.c   	tlb_end_vma(tlb, vma);
vma               491 mm/madvise.c   static long madvise_cold(struct vm_area_struct *vma,
vma               495 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
vma               498 mm/madvise.c   	*prev = vma;
vma               499 mm/madvise.c   	if (!can_madv_lru_vma(vma))
vma               504 mm/madvise.c   	madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
vma               511 mm/madvise.c   			     struct vm_area_struct *vma,
vma               519 mm/madvise.c   	tlb_start_vma(tlb, vma);
vma               520 mm/madvise.c   	walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
vma               521 mm/madvise.c   	tlb_end_vma(tlb, vma);
vma               524 mm/madvise.c   static inline bool can_do_pageout(struct vm_area_struct *vma)
vma               526 mm/madvise.c   	if (vma_is_anonymous(vma))
vma               528 mm/madvise.c   	if (!vma->vm_file)
vma               536 mm/madvise.c   	return inode_owner_or_capable(file_inode(vma->vm_file)) ||
vma               537 mm/madvise.c   		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
vma               540 mm/madvise.c   static long madvise_pageout(struct vm_area_struct *vma,
vma               544 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
vma               547 mm/madvise.c   	*prev = vma;
vma               548 mm/madvise.c   	if (!can_madv_lru_vma(vma))
vma               551 mm/madvise.c   	if (!can_do_pageout(vma))
vma               556 mm/madvise.c   	madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
vma               568 mm/madvise.c   	struct vm_area_struct *vma = walk->vma;
vma               577 mm/madvise.c   		if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
vma               609 mm/madvise.c   		page = vm_normal_page(vma, addr, ptent);
vma               699 mm/madvise.c   static int madvise_free_single_vma(struct vm_area_struct *vma,
vma               702 mm/madvise.c   	struct mm_struct *mm = vma->vm_mm;
vma               707 mm/madvise.c   	if (!vma_is_anonymous(vma))
vma               710 mm/madvise.c   	range.start = max(vma->vm_start, start_addr);
vma               711 mm/madvise.c   	if (range.start >= vma->vm_end)
vma               713 mm/madvise.c   	range.end = min(vma->vm_end, end_addr);
vma               714 mm/madvise.c   	if (range.end <= vma->vm_start)
vma               716 mm/madvise.c   	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
vma               724 mm/madvise.c   	tlb_start_vma(&tlb, vma);
vma               725 mm/madvise.c   	walk_page_range(vma->vm_mm, range.start, range.end,
vma               727 mm/madvise.c   	tlb_end_vma(&tlb, vma);
vma               753 mm/madvise.c   static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
vma               756 mm/madvise.c   	zap_page_range(vma, start, end - start);
vma               760 mm/madvise.c   static long madvise_dontneed_free(struct vm_area_struct *vma,
vma               765 mm/madvise.c   	*prev = vma;
vma               766 mm/madvise.c   	if (!can_madv_lru_vma(vma))
vma               769 mm/madvise.c   	if (!userfaultfd_remove(vma, start, end)) {
vma               773 mm/madvise.c   		vma = find_vma(current->mm, start);
vma               774 mm/madvise.c   		if (!vma)
vma               776 mm/madvise.c   		if (start < vma->vm_start) {
vma               788 mm/madvise.c   		if (!can_madv_lru_vma(vma))
vma               790 mm/madvise.c   		if (end > vma->vm_end) {
vma               803 mm/madvise.c   			end = vma->vm_end;
vma               809 mm/madvise.c   		return madvise_dontneed_single_vma(vma, start, end);
vma               811 mm/madvise.c   		return madvise_free_single_vma(vma, start, end);
vma               820 mm/madvise.c   static long madvise_remove(struct vm_area_struct *vma,
vma               830 mm/madvise.c   	if (vma->vm_flags & VM_LOCKED)
vma               833 mm/madvise.c   	f = vma->vm_file;
vma               839 mm/madvise.c   	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
vma               842 mm/madvise.c   	offset = (loff_t)(start - vma->vm_start)
vma               843 mm/madvise.c   			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vma               852 mm/madvise.c   	if (userfaultfd_remove(vma, start, end)) {
vma               934 mm/madvise.c   madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
vma               939 mm/madvise.c   		return madvise_remove(vma, prev, start, end);
vma               941 mm/madvise.c   		return madvise_willneed(vma, prev, start, end);
vma               943 mm/madvise.c   		return madvise_cold(vma, prev, start, end);
vma               945 mm/madvise.c   		return madvise_pageout(vma, prev, start, end);
vma               948 mm/madvise.c   		return madvise_dontneed_free(vma, prev, start, end, behavior);
vma               950 mm/madvise.c   		return madvise_behavior(vma, prev, start, end, behavior);
vma              1056 mm/madvise.c   	struct vm_area_struct *vma, *prev;
vma              1102 mm/madvise.c   	vma = find_vma_prev(current->mm, start, &prev);
vma              1103 mm/madvise.c   	if (vma && start > vma->vm_start)
vma              1104 mm/madvise.c   		prev = vma;
vma              1110 mm/madvise.c   		if (!vma)
vma              1114 mm/madvise.c   		if (start < vma->vm_start) {
vma              1116 mm/madvise.c   			start = vma->vm_start;
vma              1122 mm/madvise.c   		tmp = vma->vm_end;
vma              1127 mm/madvise.c   		error = madvise_vma(vma, &prev, start, tmp, behavior);
vma              1137 mm/madvise.c   			vma = prev->vm_next;
vma              1139 mm/madvise.c   			vma = find_vma(current->mm, start);
vma              5372 mm/memcontrol.c static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
vma              5375 mm/memcontrol.c 	struct page *page = vm_normal_page(vma, addr, ptent);
vma              5393 mm/memcontrol.c static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
vma              5429 mm/memcontrol.c static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
vma              5436 mm/memcontrol.c static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
vma              5443 mm/memcontrol.c 	if (!vma->vm_file) /* anonymous vma */
vma              5448 mm/memcontrol.c 	mapping = vma->vm_file->f_mapping;
vma              5449 mm/memcontrol.c 	pgoff = linear_page_index(vma, addr);
vma              5594 mm/memcontrol.c static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
vma              5602 mm/memcontrol.c 		page = mc_handle_present_pte(vma, addr, ptent);
vma              5604 mm/memcontrol.c 		page = mc_handle_swap_pte(vma, ptent, &ent);
vma              5606 mm/memcontrol.c 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
vma              5645 mm/memcontrol.c static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
vma              5670 mm/memcontrol.c static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
vma              5681 mm/memcontrol.c 	struct vm_area_struct *vma = walk->vma;
vma              5685 mm/memcontrol.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma              5692 mm/memcontrol.c 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
vma              5700 mm/memcontrol.c 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma              5702 mm/memcontrol.c 		if (get_mctgt_type(vma, addr, *pte, NULL))
vma              5881 mm/memcontrol.c 	struct vm_area_struct *vma = walk->vma;
vma              5888 mm/memcontrol.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma              5894 mm/memcontrol.c 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
vma              5922 mm/memcontrol.c 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma              5931 mm/memcontrol.c 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
vma               265 mm/memory-failure.c 		struct vm_area_struct *vma)
vma               267 mm/memory-failure.c 	unsigned long address = vma_address(page, vma);
vma               274 mm/memory-failure.c 	pgd = pgd_offset(vma->vm_mm, address);
vma               309 mm/memory-failure.c 		       struct vm_area_struct *vma,
vma               325 mm/memory-failure.c 	tk->addr = page_address_in_vma(p, vma);
vma               327 mm/memory-failure.c 		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
vma               441 mm/memory-failure.c 	struct vm_area_struct *vma;
vma               460 mm/memory-failure.c 			vma = vmac->vma;
vma               461 mm/memory-failure.c 			if (!page_mapped_in_vma(page, vma))
vma               463 mm/memory-failure.c 			if (vma->vm_mm == t->mm)
vma               464 mm/memory-failure.c 				add_to_kill(t, page, vma, to_kill, tkc);
vma               477 mm/memory-failure.c 	struct vm_area_struct *vma;
vma               489 mm/memory-failure.c 		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
vma               498 mm/memory-failure.c 			if (vma->vm_mm == t->mm)
vma               499 mm/memory-failure.c 				add_to_kill(t, page, vma, to_kill, tkc);
vma               370 mm/memory.c    void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
vma               373 mm/memory.c    	while (vma) {
vma               374 mm/memory.c    		struct vm_area_struct *next = vma->vm_next;
vma               375 mm/memory.c    		unsigned long addr = vma->vm_start;
vma               381 mm/memory.c    		unlink_anon_vmas(vma);
vma               382 mm/memory.c    		unlink_file_vma(vma);
vma               384 mm/memory.c    		if (is_vm_hugetlb_page(vma)) {
vma               385 mm/memory.c    			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
vma               391 mm/memory.c    			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
vma               393 mm/memory.c    				vma = next;
vma               394 mm/memory.c    				next = vma->vm_next;
vma               395 mm/memory.c    				unlink_anon_vmas(vma);
vma               396 mm/memory.c    				unlink_file_vma(vma);
vma               398 mm/memory.c    			free_pgd_range(tlb, addr, vma->vm_end,
vma               401 mm/memory.c    		vma = next;
vma               481 mm/memory.c    static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
vma               484 mm/memory.c    	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
vma               513 mm/memory.c    	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
vma               514 mm/memory.c    	index = linear_page_index(vma, addr);
vma               522 mm/memory.c    		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
vma               524 mm/memory.c    		 vma->vm_file,
vma               525 mm/memory.c    		 vma->vm_ops ? vma->vm_ops->fault : NULL,
vma               526 mm/memory.c    		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
vma               574 mm/memory.c    struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
vma               582 mm/memory.c    		if (vma->vm_ops && vma->vm_ops->find_special_page)
vma               583 mm/memory.c    			return vma->vm_ops->find_special_page(vma, addr);
vma               584 mm/memory.c    		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
vma               591 mm/memory.c    		print_bad_pte(vma, addr, pte, NULL);
vma               597 mm/memory.c    	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
vma               598 mm/memory.c    		if (vma->vm_flags & VM_MIXEDMAP) {
vma               604 mm/memory.c    			off = (addr - vma->vm_start) >> PAGE_SHIFT;
vma               605 mm/memory.c    			if (pfn == vma->vm_pgoff + off)
vma               607 mm/memory.c    			if (!is_cow_mapping(vma->vm_flags))
vma               617 mm/memory.c    		print_bad_pte(vma, addr, pte, NULL);
vma               630 mm/memory.c    struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
vma               640 mm/memory.c    	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
vma               641 mm/memory.c    		if (vma->vm_flags & VM_MIXEDMAP) {
vma               647 mm/memory.c    			off = (addr - vma->vm_start) >> PAGE_SHIFT;
vma               648 mm/memory.c    			if (pfn == vma->vm_pgoff + off)
vma               650 mm/memory.c    			if (!is_cow_mapping(vma->vm_flags))
vma               679 mm/memory.c    		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
vma               682 mm/memory.c    	unsigned long vm_flags = vma->vm_flags;
vma               770 mm/memory.c    	page = vm_normal_page(vma, addr, pte);
vma               785 mm/memory.c    		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
vma               824 mm/memory.c    							vma, addr, rss);
vma               848 mm/memory.c    		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
vma               863 mm/memory.c    			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
vma               865 mm/memory.c    					    dst_pmd, src_pmd, addr, vma);
vma               875 mm/memory.c    						vma, addr, next))
vma               882 mm/memory.c    		p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
vma               897 mm/memory.c    			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
vma               899 mm/memory.c    					    dst_pud, src_pud, addr, vma);
vma               909 mm/memory.c    						vma, addr, next))
vma               916 mm/memory.c    		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
vma               931 mm/memory.c    						vma, addr, next))
vma               938 mm/memory.c    		struct vm_area_struct *vma)
vma               942 mm/memory.c    	unsigned long addr = vma->vm_start;
vma               943 mm/memory.c    	unsigned long end = vma->vm_end;
vma               954 mm/memory.c    	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
vma               955 mm/memory.c    			!vma->anon_vma)
vma               958 mm/memory.c    	if (is_vm_hugetlb_page(vma))
vma               959 mm/memory.c    		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
vma               961 mm/memory.c    	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
vma               966 mm/memory.c    		ret = track_pfn_copy(vma);
vma               977 mm/memory.c    	is_cow = is_cow_mapping(vma->vm_flags);
vma               981 mm/memory.c    					0, vma, src_mm, addr, end);
vma               993 mm/memory.c    					    vma, addr, next))) {
vma              1005 mm/memory.c    				struct vm_area_struct *vma, pmd_t *pmd,
vma              1035 mm/memory.c    			page = vm_normal_page(vma, addr, ptent);
vma              1058 mm/memory.c    				    likely(!(vma->vm_flags & VM_SEQ_READ)))
vma              1064 mm/memory.c    				print_bad_pte(vma, addr, ptent, page);
vma              1108 mm/memory.c    			print_bad_pte(vma, addr, ptent, NULL);
vma              1140 mm/memory.c    				struct vm_area_struct *vma, pud_t *pud,
vma              1152 mm/memory.c    				__split_huge_pmd(vma, pmd, addr, false, NULL);
vma              1153 mm/memory.c    			else if (zap_huge_pmd(tlb, vma, pmd, addr))
vma              1166 mm/memory.c    		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
vma              1175 mm/memory.c    				struct vm_area_struct *vma, p4d_t *p4d,
vma              1187 mm/memory.c    				VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
vma              1188 mm/memory.c    				split_huge_pud(vma, pud, addr);
vma              1189 mm/memory.c    			} else if (zap_huge_pud(tlb, vma, pud, addr))
vma              1195 mm/memory.c    		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
vma              1204 mm/memory.c    				struct vm_area_struct *vma, pgd_t *pgd,
vma              1216 mm/memory.c    		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
vma              1223 mm/memory.c    			     struct vm_area_struct *vma,
vma              1231 mm/memory.c    	tlb_start_vma(tlb, vma);
vma              1232 mm/memory.c    	pgd = pgd_offset(vma->vm_mm, addr);
vma              1237 mm/memory.c    		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
vma              1239 mm/memory.c    	tlb_end_vma(tlb, vma);
vma              1244 mm/memory.c    		struct vm_area_struct *vma, unsigned long start_addr,
vma              1248 mm/memory.c    	unsigned long start = max(vma->vm_start, start_addr);
vma              1251 mm/memory.c    	if (start >= vma->vm_end)
vma              1253 mm/memory.c    	end = min(vma->vm_end, end_addr);
vma              1254 mm/memory.c    	if (end <= vma->vm_start)
vma              1257 mm/memory.c    	if (vma->vm_file)
vma              1258 mm/memory.c    		uprobe_munmap(vma, start, end);
vma              1260 mm/memory.c    	if (unlikely(vma->vm_flags & VM_PFNMAP))
vma              1261 mm/memory.c    		untrack_pfn(vma, 0, 0);
vma              1264 mm/memory.c    		if (unlikely(is_vm_hugetlb_page(vma))) {
vma              1276 mm/memory.c    			if (vma->vm_file) {
vma              1277 mm/memory.c    				i_mmap_lock_write(vma->vm_file->f_mapping);
vma              1278 mm/memory.c    				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
vma              1279 mm/memory.c    				i_mmap_unlock_write(vma->vm_file->f_mapping);
vma              1282 mm/memory.c    			unmap_page_range(tlb, vma, start, end, details);
vma              1305 mm/memory.c    		struct vm_area_struct *vma, unsigned long start_addr,
vma              1310 mm/memory.c    	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
vma              1313 mm/memory.c    	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
vma              1314 mm/memory.c    		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
vma              1326 mm/memory.c    void zap_page_range(struct vm_area_struct *vma, unsigned long start,
vma              1333 mm/memory.c    	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              1335 mm/memory.c    	tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
vma              1336 mm/memory.c    	update_hiwater_rss(vma->vm_mm);
vma              1338 mm/memory.c    	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
vma              1339 mm/memory.c    		unmap_single_vma(&tlb, vma, start, range.end, NULL);
vma              1353 mm/memory.c    static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
vma              1360 mm/memory.c    	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              1362 mm/memory.c    	tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
vma              1363 mm/memory.c    	update_hiwater_rss(vma->vm_mm);
vma              1365 mm/memory.c    	unmap_single_vma(&tlb, vma, address, range.end, details);
vma              1381 mm/memory.c    void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
vma              1384 mm/memory.c    	if (address < vma->vm_start || address + size > vma->vm_end ||
vma              1385 mm/memory.c    	    		!(vma->vm_flags & VM_PFNMAP))
vma              1388 mm/memory.c    	zap_page_range_single(vma, address, size, NULL);
vma              1422 mm/memory.c    static int insert_page(struct vm_area_struct *vma, unsigned long addr,
vma              1425 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
vma              1484 mm/memory.c    int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
vma              1487 mm/memory.c    	if (addr < vma->vm_start || addr >= vma->vm_end)
vma              1491 mm/memory.c    	if (!(vma->vm_flags & VM_MIXEDMAP)) {
vma              1492 mm/memory.c    		BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
vma              1493 mm/memory.c    		BUG_ON(vma->vm_flags & VM_PFNMAP);
vma              1494 mm/memory.c    		vma->vm_flags |= VM_MIXEDMAP;
vma              1496 mm/memory.c    	return insert_page(vma, addr, page, vma->vm_page_prot);
vma              1511 mm/memory.c    static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
vma              1514 mm/memory.c    	unsigned long count = vma_pages(vma);
vma              1515 mm/memory.c    	unsigned long uaddr = vma->vm_start;
vma              1527 mm/memory.c    		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
vma              1554 mm/memory.c    int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
vma              1557 mm/memory.c    	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
vma              1574 mm/memory.c    int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
vma              1577 mm/memory.c    	return __vm_map_pages(vma, pages, num, 0);
vma              1581 mm/memory.c    static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
vma              1584 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
vma              1608 mm/memory.c    			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
vma              1609 mm/memory.c    			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
vma              1610 mm/memory.c    				update_mmu_cache(vma, addr, pte);
vma              1623 mm/memory.c    		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
vma              1627 mm/memory.c    	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
vma              1652 mm/memory.c    vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
vma              1661 mm/memory.c    	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
vma              1662 mm/memory.c    	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
vma              1664 mm/memory.c    	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
vma              1665 mm/memory.c    	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
vma              1667 mm/memory.c    	if (addr < vma->vm_start || addr >= vma->vm_end)
vma              1673 mm/memory.c    	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
vma              1675 mm/memory.c    	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
vma              1700 mm/memory.c    vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
vma              1703 mm/memory.c    	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
vma              1707 mm/memory.c    static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
vma              1710 mm/memory.c    	if (vma->vm_flags & VM_MIXEDMAP)
vma              1721 mm/memory.c    static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
vma              1724 mm/memory.c    	pgprot_t pgprot = vma->vm_page_prot;
vma              1727 mm/memory.c    	BUG_ON(!vm_mixed_ok(vma, pfn));
vma              1729 mm/memory.c    	if (addr < vma->vm_start || addr >= vma->vm_end)
vma              1732 mm/memory.c    	track_pfn_insert(vma, &pgprot, pfn);
vma              1754 mm/memory.c    		err = insert_page(vma, addr, page, pgprot);
vma              1756 mm/memory.c    		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
vma              1767 mm/memory.c    vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
vma              1770 mm/memory.c    	return __vm_insert_mixed(vma, addr, pfn, false);
vma              1779 mm/memory.c    vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
vma              1782 mm/memory.c    	return __vm_insert_mixed(vma, addr, pfn, true);
vma              1896 mm/memory.c    int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
vma              1902 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
vma              1924 mm/memory.c    	if (is_cow_mapping(vma->vm_flags)) {
vma              1925 mm/memory.c    		if (addr != vma->vm_start || end != vma->vm_end)
vma              1927 mm/memory.c    		vma->vm_pgoff = pfn;
vma              1930 mm/memory.c    	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
vma              1934 mm/memory.c    	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma              1939 mm/memory.c    	flush_cache_range(vma, addr, end);
vma              1949 mm/memory.c    		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
vma              1970 mm/memory.c    int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
vma              1989 mm/memory.c    	if (vma->vm_pgoff > pages)
vma              1991 mm/memory.c    	pfn += vma->vm_pgoff;
vma              1992 mm/memory.c    	pages -= vma->vm_pgoff;
vma              1995 mm/memory.c    	vm_len = vma->vm_end - vma->vm_start;
vma              2000 mm/memory.c    	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
vma              2148 mm/memory.c    static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
vma              2173 mm/memory.c    		copy_user_highpage(dst, src, va, vma);
vma              2176 mm/memory.c    static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
vma              2178 mm/memory.c    	struct file *vm_file = vma->vm_file;
vma              2204 mm/memory.c    	if (vmf->vma->vm_file &&
vma              2205 mm/memory.c    	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
vma              2208 mm/memory.c    	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
vma              2232 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2236 mm/memory.c    	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
vma              2250 mm/memory.c    		file_update_time(vma->vm_file);
vma              2286 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2297 mm/memory.c    	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
vma              2299 mm/memory.c    	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
vma              2300 mm/memory.c    	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
vma              2301 mm/memory.c    		update_mmu_cache(vma, vmf->address, vmf->pte);
vma              2323 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2324 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
vma              2332 mm/memory.c    	if (unlikely(anon_vma_prepare(vma)))
vma              2336 mm/memory.c    		new_page = alloc_zeroed_user_highpage_movable(vma,
vma              2341 mm/memory.c    		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vma              2345 mm/memory.c    		cow_user_page(new_page, old_page, vmf->address, vma);
vma              2353 mm/memory.c    	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
vma              2372 mm/memory.c    		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
vma              2373 mm/memory.c    		entry = mk_pte(new_page, vma->vm_page_prot);
vma              2374 mm/memory.c    		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
vma              2381 mm/memory.c    		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
vma              2382 mm/memory.c    		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
vma              2384 mm/memory.c    		lru_cache_add_active_or_unevictable(new_page, vma);
vma              2391 mm/memory.c    		update_mmu_cache(vma, vmf->address, vmf->pte);
vma              2439 mm/memory.c    		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
vma              2474 mm/memory.c    	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
vma              2475 mm/memory.c    	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
vma              2495 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2497 mm/memory.c    	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
vma              2502 mm/memory.c    		ret = vma->vm_ops->pfn_mkwrite(vmf);
vma              2514 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2519 mm/memory.c    	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
vma              2566 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2568 mm/memory.c    	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
vma              2577 mm/memory.c    		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
vma              2598 mm/memory.c    			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vma              2609 mm/memory.c    			bool reused = reuse_ksm_page(vmf->page, vmf->vma,
vma              2626 mm/memory.c    				page_move_anon_rmap(vmf->page, vma);
vma              2633 mm/memory.c    	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
vma              2647 mm/memory.c    static void unmap_mapping_range_vma(struct vm_area_struct *vma,
vma              2651 mm/memory.c    	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
vma              2657 mm/memory.c    	struct vm_area_struct *vma;
vma              2660 mm/memory.c    	vma_interval_tree_foreach(vma, root,
vma              2663 mm/memory.c    		vba = vma->vm_pgoff;
vma              2664 mm/memory.c    		vea = vba + vma_pages(vma) - 1;
vma              2672 mm/memory.c    		unmap_mapping_range_vma(vma,
vma              2673 mm/memory.c    			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
vma              2674 mm/memory.c    			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
vma              2753 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2762 mm/memory.c    	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
vma              2768 mm/memory.c    			migration_entry_wait(vma->vm_mm, vmf->pmd,
vma              2776 mm/memory.c    			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
vma              2784 mm/memory.c    	page = lookup_swap_cache(entry, vma, vmf->address);
vma              2793 mm/memory.c    			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vma              2813 mm/memory.c    			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vma              2824 mm/memory.c    		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
vma              2835 mm/memory.c    	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
vma              2853 mm/memory.c    	page = ksm_might_need_to_copy(page, vma, vmf->address);
vma              2860 mm/memory.c    	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL,
vma              2869 mm/memory.c    	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
vma              2889 mm/memory.c    	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
vma              2890 mm/memory.c    	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
vma              2891 mm/memory.c    	pte = mk_pte(page, vma->vm_page_prot);
vma              2893 mm/memory.c    		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vma              2898 mm/memory.c    	flush_icache_page(vma, page);
vma              2901 mm/memory.c    	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
vma              2902 mm/memory.c    	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
vma              2907 mm/memory.c    		page_add_new_anon_rmap(page, vma, vmf->address, false);
vma              2909 mm/memory.c    		lru_cache_add_active_or_unevictable(page, vma);
vma              2911 mm/memory.c    		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
vma              2918 mm/memory.c    	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
vma              2942 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vma              2968 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              2975 mm/memory.c    	if (vma->vm_flags & VM_SHARED)
vma              2988 mm/memory.c    	if (pte_alloc(vma->vm_mm, vmf->pmd))
vma              2997 mm/memory.c    			!mm_forbids_zeropage(vma->vm_mm)) {
vma              2999 mm/memory.c    						vma->vm_page_prot));
vma              3000 mm/memory.c    		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vma              3004 mm/memory.c    		ret = check_stable_address_space(vma->vm_mm);
vma              3008 mm/memory.c    		if (userfaultfd_missing(vma)) {
vma              3016 mm/memory.c    	if (unlikely(anon_vma_prepare(vma)))
vma              3018 mm/memory.c    	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
vma              3022 mm/memory.c    	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
vma              3033 mm/memory.c    	entry = mk_pte(page, vma->vm_page_prot);
vma              3034 mm/memory.c    	if (vma->vm_flags & VM_WRITE)
vma              3037 mm/memory.c    	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
vma              3042 mm/memory.c    	ret = check_stable_address_space(vma->vm_mm);
vma              3047 mm/memory.c    	if (userfaultfd_missing(vma)) {
vma              3054 mm/memory.c    	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
vma              3055 mm/memory.c    	page_add_new_anon_rmap(page, vma, vmf->address, false);
vma              3057 mm/memory.c    	lru_cache_add_active_or_unevictable(page, vma);
vma              3059 mm/memory.c    	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
vma              3062 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vma              3083 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3102 mm/memory.c    		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
vma              3108 mm/memory.c    	ret = vma->vm_ops->fault(vmf);
vma              3142 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3147 mm/memory.c    		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vma              3153 mm/memory.c    		mm_inc_nr_ptes(vma->vm_mm);
vma              3154 mm/memory.c    		pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
vma              3157 mm/memory.c    	} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
vma              3184 mm/memory.c    	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
vma              3192 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3194 mm/memory.c    	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
vma              3199 mm/memory.c    	mm_inc_nr_ptes(vma->vm_mm);
vma              3205 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3212 mm/memory.c    	if (!transhuge_vma_suitable(vma, haddr))
vma              3223 mm/memory.c    		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
vma              3229 mm/memory.c    	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
vma              3234 mm/memory.c    		flush_icache_page(vma, page + i);
vma              3236 mm/memory.c    	entry = mk_huge_pmd(page, vma->vm_page_prot);
vma              3238 mm/memory.c    		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
vma              3240 mm/memory.c    	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
vma              3248 mm/memory.c    	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
vma              3250 mm/memory.c    	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
vma              3286 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3311 mm/memory.c    	flush_icache_page(vma, page);
vma              3312 mm/memory.c    	entry = mk_pte(page, vma->vm_page_prot);
vma              3314 mm/memory.c    		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
vma              3316 mm/memory.c    	if (write && !(vma->vm_flags & VM_SHARED)) {
vma              3317 mm/memory.c    		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
vma              3318 mm/memory.c    		page_add_new_anon_rmap(page, vma, vmf->address, false);
vma              3320 mm/memory.c    		lru_cache_add_active_or_unevictable(page, vma);
vma              3322 mm/memory.c    		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
vma              3325 mm/memory.c    	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
vma              3328 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vma              3356 mm/memory.c    	    !(vmf->vma->vm_flags & VM_SHARED))
vma              3365 mm/memory.c    	if (!(vmf->vma->vm_flags & VM_SHARED))
vma              3366 mm/memory.c    		ret = check_stable_address_space(vmf->vma->vm_mm);
vma              3445 mm/memory.c    	vmf->address = max(address & mask, vmf->vma->vm_start);
vma              3456 mm/memory.c    	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
vma              3460 mm/memory.c    		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
vma              3466 mm/memory.c    	vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
vma              3491 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3499 mm/memory.c    	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
vma              3518 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3521 mm/memory.c    	if (unlikely(anon_vma_prepare(vma)))
vma              3524 mm/memory.c    	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
vma              3528 mm/memory.c    	if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
vma              3540 mm/memory.c    	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
vma              3557 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3568 mm/memory.c    	if (vma->vm_ops->page_mkwrite) {
vma              3600 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3601 mm/memory.c    	struct mm_struct *vm_mm = vma->vm_mm;
vma              3607 mm/memory.c    	if (!vma->vm_ops->fault) {
vma              3615 mm/memory.c    			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
vma              3635 mm/memory.c    	else if (!(vma->vm_flags & VM_SHARED))
vma              3648 mm/memory.c    static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
vma              3660 mm/memory.c    	return mpol_misplaced(page, vma, addr);
vma              3665 mm/memory.c    	struct vm_area_struct *vma = vmf->vma;
vma              3680 mm/memory.c    	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
vma              3691 mm/memory.c    	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
vma              3692 mm/memory.c    	pte = pte_modify(old_pte, vma->vm_page_prot);
vma              3696 mm/memory.c    	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
vma              3697 mm/memory.c    	update_mmu_cache(vma, vmf->address, vmf->pte);
vma              3699 mm/memory.c    	page = vm_normal_page(vma, vmf->address, pte);
vma              3726 mm/memory.c    	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
vma              3731 mm/memory.c    	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
vma              3740 mm/memory.c    	migrated = migrate_misplaced_page(page, vma, target_nid);
vma              3755 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vma              3757 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vma              3758 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
vma              3765 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vma              3767 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vma              3768 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
vma              3771 mm/memory.c    	VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
vma              3772 mm/memory.c    	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
vma              3777 mm/memory.c    static inline bool vma_is_accessible(struct vm_area_struct *vma)
vma              3779 mm/memory.c    	return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
vma              3786 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vma              3788 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vma              3789 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
vma              3798 mm/memory.c    	if (vma_is_anonymous(vmf->vma))
vma              3800 mm/memory.c    	if (vmf->vma->vm_ops->huge_fault)
vma              3801 mm/memory.c    		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
vma              3862 mm/memory.c    		if (vma_is_anonymous(vmf->vma))
vma              3871 mm/memory.c    	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
vma              3874 mm/memory.c    	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
vma              3885 mm/memory.c    	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
vma              3887 mm/memory.c    		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
vma              3896 mm/memory.c    			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
vma              3909 mm/memory.c    static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
vma              3913 mm/memory.c    		.vma = vma,
vma              3916 mm/memory.c    		.pgoff = linear_page_index(vma, address),
vma              3917 mm/memory.c    		.gfp_mask = __get_fault_gfp_mask(vma),
vma              3920 mm/memory.c    	struct mm_struct *mm = vma->vm_mm;
vma              3933 mm/memory.c    	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
vma              3959 mm/memory.c    	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
vma              3975 mm/memory.c    			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
vma              3998 mm/memory.c    vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
vma              4006 mm/memory.c    	count_memcg_event_mm(vma->vm_mm, PGFAULT);
vma              4011 mm/memory.c    	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
vma              4023 mm/memory.c    	if (unlikely(is_vm_hugetlb_page(vma)))
vma              4024 mm/memory.c    		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
vma              4026 mm/memory.c    		ret = __handle_mm_fault(vma, address, flags);
vma              4235 mm/memory.c    int follow_pfn(struct vm_area_struct *vma, unsigned long address,
vma              4242 mm/memory.c    	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
vma              4245 mm/memory.c    	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
vma              4255 mm/memory.c    int follow_phys(struct vm_area_struct *vma,
vma              4263 mm/memory.c    	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
vma              4266 mm/memory.c    	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
vma              4283 mm/memory.c    int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
vma              4291 mm/memory.c    	if (follow_phys(vma, addr, write, &prot, &phys_addr))
vma              4316 mm/memory.c    	struct vm_area_struct *vma;
vma              4330 mm/memory.c    				gup_flags, &page, &vma, NULL);
vma              4339 mm/memory.c    			vma = find_vma(mm, addr);
vma              4340 mm/memory.c    			if (!vma || vma->vm_start > addr)
vma              4342 mm/memory.c    			if (vma->vm_ops && vma->vm_ops->access)
vma              4343 mm/memory.c    				ret = vma->vm_ops->access(vma, addr, buf,
vma              4357 mm/memory.c    				copy_to_user_page(vma, page, addr,
vma              4361 mm/memory.c    				copy_from_user_page(vma, page, addr,
vma              4423 mm/memory.c    	struct vm_area_struct *vma;
vma              4431 mm/memory.c    	vma = find_vma(mm, ip);
vma              4432 mm/memory.c    	if (vma && vma->vm_file) {
vma              4433 mm/memory.c    		struct file *f = vma->vm_file;
vma              4442 mm/memory.c    					vma->vm_start,
vma              4443 mm/memory.c    					vma->vm_end - vma->vm_start);
vma              4562 mm/memory.c    				    struct vm_area_struct *vma,
vma              4571 mm/memory.c    		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
vma              4582 mm/memory.c    	struct vm_area_struct *vma;
vma              4590 mm/memory.c    			   addr, copy_arg->vma);
vma              4594 mm/memory.c    			 unsigned long addr_hint, struct vm_area_struct *vma,
vma              4602 mm/memory.c    		.vma = vma,
vma              4606 mm/memory.c    		copy_user_gigantic_page(dst, src, addr, vma,
vma               380 mm/mempolicy.c 	struct vm_area_struct *vma;
vma               383 mm/mempolicy.c 	for (vma = mm->mmap; vma; vma = vma->vm_next)
vma               384 mm/mempolicy.c 		mpol_rebind_policy(vma->vm_policy, new);
vma               456 mm/mempolicy.c 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
vma               466 mm/mempolicy.c 		if (!vma_migratable(walk->vma) ||
vma               493 mm/mempolicy.c 	struct vm_area_struct *vma = walk->vma;
vma               502 mm/mempolicy.c 	ptl = pmd_trans_huge_lock(pmd, vma);
vma               517 mm/mempolicy.c 		page = vm_normal_page(vma, addr, *pte);
vma               530 mm/mempolicy.c 			if (!vma_migratable(vma)) {
vma               565 mm/mempolicy.c 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
vma               594 mm/mempolicy.c unsigned long change_prot_numa(struct vm_area_struct *vma,
vma               599 mm/mempolicy.c 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
vma               606 mm/mempolicy.c static unsigned long change_prot_numa(struct vm_area_struct *vma,
vma               616 mm/mempolicy.c 	struct vm_area_struct *vma = walk->vma;
vma               618 mm/mempolicy.c 	unsigned long endvma = vma->vm_end;
vma               625 mm/mempolicy.c 	if (!vma_migratable(vma) &&
vma               631 mm/mempolicy.c 	if (vma->vm_start > start)
vma               632 mm/mempolicy.c 		start = vma->vm_start;
vma               635 mm/mempolicy.c 		if (!vma->vm_next && vma->vm_end < end)
vma               637 mm/mempolicy.c 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
vma               641 mm/mempolicy.c 	qp->prev = vma;
vma               645 mm/mempolicy.c 		if (!is_vm_hugetlb_page(vma) &&
vma               646 mm/mempolicy.c 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
vma               647 mm/mempolicy.c 			!(vma->vm_flags & VM_MIXEDMAP))
vma               648 mm/mempolicy.c 			change_prot_numa(vma, start, endvma);
vma               698 mm/mempolicy.c static int vma_replace_policy(struct vm_area_struct *vma,
vma               706 mm/mempolicy.c 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
vma               707 mm/mempolicy.c 		 vma->vm_ops, vma->vm_file,
vma               708 mm/mempolicy.c 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
vma               714 mm/mempolicy.c 	if (vma->vm_ops && vma->vm_ops->set_policy) {
vma               715 mm/mempolicy.c 		err = vma->vm_ops->set_policy(vma, new);
vma               720 mm/mempolicy.c 	old = vma->vm_policy;
vma               721 mm/mempolicy.c 	vma->vm_policy = new; /* protected by mmap_sem */
vma               736 mm/mempolicy.c 	struct vm_area_struct *vma;
vma               742 mm/mempolicy.c 	vma = find_vma(mm, start);
vma               743 mm/mempolicy.c 	if (!vma || vma->vm_start > start)
vma               746 mm/mempolicy.c 	prev = vma->vm_prev;
vma               747 mm/mempolicy.c 	if (start > vma->vm_start)
vma               748 mm/mempolicy.c 		prev = vma;
vma               750 mm/mempolicy.c 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
vma               751 mm/mempolicy.c 		next = vma->vm_next;
vma               752 mm/mempolicy.c 		vmstart = max(start, vma->vm_start);
vma               753 mm/mempolicy.c 		vmend   = min(end, vma->vm_end);
vma               755 mm/mempolicy.c 		if (mpol_equal(vma_policy(vma), new_pol))
vma               758 mm/mempolicy.c 		pgoff = vma->vm_pgoff +
vma               759 mm/mempolicy.c 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
vma               760 mm/mempolicy.c 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
vma               761 mm/mempolicy.c 				 vma->anon_vma, vma->vm_file, pgoff,
vma               762 mm/mempolicy.c 				 new_pol, vma->vm_userfaultfd_ctx);
vma               764 mm/mempolicy.c 			vma = prev;
vma               765 mm/mempolicy.c 			next = vma->vm_next;
vma               766 mm/mempolicy.c 			if (mpol_equal(vma_policy(vma), new_pol))
vma               771 mm/mempolicy.c 		if (vma->vm_start != vmstart) {
vma               772 mm/mempolicy.c 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
vma               776 mm/mempolicy.c 		if (vma->vm_end != vmend) {
vma               777 mm/mempolicy.c 			err = split_vma(vma->vm_mm, vma, vmend, 0);
vma               782 mm/mempolicy.c 		err = vma_replace_policy(vma, new_pol);
vma               876 mm/mempolicy.c 	struct vm_area_struct *vma = NULL;
vma               900 mm/mempolicy.c 		vma = find_vma_intersection(mm, addr, addr+1);
vma               901 mm/mempolicy.c 		if (!vma) {
vma               905 mm/mempolicy.c 		if (vma->vm_ops && vma->vm_ops->get_policy)
vma               906 mm/mempolicy.c 			pol = vma->vm_ops->get_policy(vma, addr);
vma               908 mm/mempolicy.c 			pol = vma->vm_policy;
vma               924 mm/mempolicy.c 			vma = NULL;
vma               960 mm/mempolicy.c 	if (vma)
vma              1167 mm/mempolicy.c 	struct vm_area_struct *vma;
vma              1170 mm/mempolicy.c 	vma = find_vma(current->mm, start);
vma              1171 mm/mempolicy.c 	while (vma) {
vma              1172 mm/mempolicy.c 		address = page_address_in_vma(page, vma);
vma              1175 mm/mempolicy.c 		vma = vma->vm_next;
vma              1180 mm/mempolicy.c 				vma, address);
vma              1184 mm/mempolicy.c 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
vma              1195 mm/mempolicy.c 			vma, address);
vma              1702 mm/mempolicy.c struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
vma              1707 mm/mempolicy.c 	if (vma) {
vma              1708 mm/mempolicy.c 		if (vma->vm_ops && vma->vm_ops->get_policy) {
vma              1709 mm/mempolicy.c 			pol = vma->vm_ops->get_policy(vma, addr);
vma              1710 mm/mempolicy.c 		} else if (vma->vm_policy) {
vma              1711 mm/mempolicy.c 			pol = vma->vm_policy;
vma              1739 mm/mempolicy.c static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
vma              1742 mm/mempolicy.c 	struct mempolicy *pol = __get_vma_policy(vma, addr);
vma              1750 mm/mempolicy.c bool vma_policy_mof(struct vm_area_struct *vma)
vma              1754 mm/mempolicy.c 	if (vma->vm_ops && vma->vm_ops->get_policy) {
vma              1757 mm/mempolicy.c 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
vma              1765 mm/mempolicy.c 	pol = vma->vm_policy;
vma              1906 mm/mempolicy.c 		 struct vm_area_struct *vma, unsigned long addr, int shift)
vma              1908 mm/mempolicy.c 	if (vma) {
vma              1919 mm/mempolicy.c 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
vma              1920 mm/mempolicy.c 		off += (addr - vma->vm_start) >> shift;
vma              1942 mm/mempolicy.c int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
vma              1947 mm/mempolicy.c 	*mpol = get_vma_policy(vma, addr);
vma              1951 mm/mempolicy.c 		nid = interleave_nid(*mpol, vma, addr,
vma              1952 mm/mempolicy.c 					huge_page_shift(hstate_vma(vma)));
vma              2098 mm/mempolicy.c alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
vma              2106 mm/mempolicy.c 	pol = get_vma_policy(vma, addr);
vma              2111 mm/mempolicy.c 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
vma              2385 mm/mempolicy.c int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
vma              2396 mm/mempolicy.c 	pol = get_vma_policy(vma, addr);
vma              2402 mm/mempolicy.c 		pgoff = vma->vm_pgoff;
vma              2403 mm/mempolicy.c 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
vma              2620 mm/mempolicy.c 			struct vm_area_struct *vma, struct mempolicy *npol)
vma              2624 mm/mempolicy.c 	unsigned long sz = vma_pages(vma);
vma              2627 mm/mempolicy.c 		 vma->vm_pgoff,
vma              2633 mm/mempolicy.c 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
vma              2637 mm/mempolicy.c 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
vma               204 mm/migrate.c   static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
vma               209 mm/migrate.c   		.vma = vma,
vma               223 mm/migrate.c   				linear_page_index(vma, pvmw.address);
vma               235 mm/migrate.c   		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
vma               244 mm/migrate.c   			pte = maybe_mkwrite(pte, vma);
vma               256 mm/migrate.c   			pte = arch_make_huge_pte(pte, vma, new, 0);
vma               257 mm/migrate.c   			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
vma               259 mm/migrate.c   				hugepage_add_anon_rmap(new, vma, pvmw.address);
vma               265 mm/migrate.c   			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
vma               268 mm/migrate.c   				page_add_anon_rmap(new, vma, pvmw.address, false);
vma               272 mm/migrate.c   		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
vma               279 mm/migrate.c   		update_mmu_cache(vma, pvmw.address, pvmw.pte);
vma               347 mm/migrate.c   void migration_entry_wait_huge(struct vm_area_struct *vma,
vma               350 mm/migrate.c   	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
vma              1528 mm/migrate.c   	struct vm_area_struct *vma;
vma              1535 mm/migrate.c   	vma = find_vma(mm, addr);
vma              1536 mm/migrate.c   	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
vma              1541 mm/migrate.c   	page = follow_page(vma, addr, follflags);
vma              1722 mm/migrate.c   		struct vm_area_struct *vma;
vma              1726 mm/migrate.c   		vma = find_vma(mm, addr);
vma              1727 mm/migrate.c   		if (!vma || addr < vma->vm_start)
vma              1731 mm/migrate.c   		page = follow_page(vma, addr, FOLL_DUMP);
vma              1969 mm/migrate.c   int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
vma              1982 mm/migrate.c   	    (vma->vm_flags & VM_EXEC))
vma              2025 mm/migrate.c   				struct vm_area_struct *vma,
vma              2059 mm/migrate.c   	flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
vma              2086 mm/migrate.c   	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
vma              2087 mm/migrate.c   	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
vma              2097 mm/migrate.c   	page_add_anon_rmap(new_page, vma, start, true);
vma              2110 mm/migrate.c   	update_mmu_cache_pmd(vma, address, &entry);
vma              2140 mm/migrate.c   		entry = pmd_modify(entry, vma->vm_page_prot);
vma              2142 mm/migrate.c   		update_mmu_cache_pmd(vma, address, &entry);
vma              2194 mm/migrate.c   	struct vm_area_struct *vma = walk->vma;
vma              2195 mm/migrate.c   	struct mm_struct *mm = vma->vm_mm;
vma              2216 mm/migrate.c   			split_huge_pmd(vma, pmdp, addr);
vma              2284 mm/migrate.c   			page = vm_normal_page(migrate->vma, addr, pte);
vma              2347 mm/migrate.c   		flush_tlb_range(walk->vma, start, end);
vma              2370 mm/migrate.c   			migrate->vma->vm_mm, migrate->start, migrate->end);
vma              2373 mm/migrate.c   	walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
vma              2532 mm/migrate.c   		remove_migration_pte(page, migrate->vma, addr, page);
vma              2669 mm/migrate.c   	if (!args->vma || is_vm_hugetlb_page(args->vma) ||
vma              2670 mm/migrate.c   	    (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
vma              2674 mm/migrate.c   	if (args->start < args->vma->vm_start ||
vma              2675 mm/migrate.c   	    args->start >= args->vma->vm_end)
vma              2677 mm/migrate.c   	if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
vma              2709 mm/migrate.c   	struct vm_area_struct *vma = migrate->vma;
vma              2710 mm/migrate.c   	struct mm_struct *mm = vma->vm_mm;
vma              2722 mm/migrate.c   	if (!vma_is_anonymous(vma))
vma              2756 mm/migrate.c   	if (unlikely(anon_vma_prepare(vma)))
vma              2758 mm/migrate.c   	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
vma              2772 mm/migrate.c   			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
vma              2776 mm/migrate.c   		entry = mk_pte(page, vma->vm_page_prot);
vma              2777 mm/migrate.c   		if (vma->vm_flags & VM_WRITE)
vma              2802 mm/migrate.c   	if (userfaultfd_missing(vma)) {
vma              2809 mm/migrate.c   	page_add_new_anon_rmap(page, vma, addr, false);
vma              2812 mm/migrate.c   		lru_cache_add_active_or_unevictable(page, vma);
vma              2816 mm/migrate.c   		flush_cache_page(vma, addr, pte_pfn(*ptep));
vma              2817 mm/migrate.c   		ptep_clear_flush_notify(vma, addr, ptep);
vma              2819 mm/migrate.c   		update_mmu_cache(vma, addr, ptep);
vma              2823 mm/migrate.c   		update_mmu_cache(vma, addr, ptep);
vma              2871 mm/migrate.c   							migrate->vma->vm_mm,
vma                96 mm/mincore.c   				struct vm_area_struct *vma, unsigned char *vec)
vma               101 mm/mincore.c   	if (vma->vm_file) {
vma               104 mm/mincore.c   		pgoff = linear_page_index(vma, addr);
vma               106 mm/mincore.c   			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
vma               118 mm/mincore.c   						  walk->vma, walk->private);
vma               126 mm/mincore.c   	struct vm_area_struct *vma = walk->vma;
vma               131 mm/mincore.c   	ptl = pmd_trans_huge_lock(pmd, vma);
vma               139 mm/mincore.c   		__mincore_unmapped_range(addr, end, vma, vec);
vma               149 mm/mincore.c   						 vma, vec);
vma               180 mm/mincore.c   static inline bool can_do_mincore(struct vm_area_struct *vma)
vma               182 mm/mincore.c   	if (vma_is_anonymous(vma))
vma               184 mm/mincore.c   	if (!vma->vm_file)
vma               192 mm/mincore.c   	return inode_owner_or_capable(file_inode(vma->vm_file)) ||
vma               193 mm/mincore.c   		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
vma               209 mm/mincore.c   	struct vm_area_struct *vma;
vma               213 mm/mincore.c   	vma = find_vma(current->mm, addr);
vma               214 mm/mincore.c   	if (!vma || addr < vma->vm_start)
vma               216 mm/mincore.c   	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
vma               217 mm/mincore.c   	if (!can_do_mincore(vma)) {
vma               222 mm/mincore.c   	err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
vma               375 mm/mlock.c     			struct vm_area_struct *vma, struct zone *zone,
vma               386 mm/mlock.c     	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
vma               399 mm/mlock.c     			page = vm_normal_page(vma, start, *pte);
vma               445 mm/mlock.c     void munlock_vma_pages_range(struct vm_area_struct *vma,
vma               448 mm/mlock.c     	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
vma               465 mm/mlock.c     		page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
vma               497 mm/mlock.c     				start = __munlock_pagevec_fill(&pvec, vma,
vma               519 mm/mlock.c     static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
vma               522 mm/mlock.c     	struct mm_struct *mm = vma->vm_mm;
vma               527 mm/mlock.c     	vm_flags_t old_flags = vma->vm_flags;
vma               529 mm/mlock.c     	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
vma               530 mm/mlock.c     	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
vma               531 mm/mlock.c     	    vma_is_dax(vma))
vma               535 mm/mlock.c     	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
vma               536 mm/mlock.c     	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
vma               537 mm/mlock.c     			  vma->vm_file, pgoff, vma_policy(vma),
vma               538 mm/mlock.c     			  vma->vm_userfaultfd_ctx);
vma               540 mm/mlock.c     		vma = *prev;
vma               544 mm/mlock.c     	if (start != vma->vm_start) {
vma               545 mm/mlock.c     		ret = split_vma(mm, vma, start, 1);
vma               550 mm/mlock.c     	if (end != vma->vm_end) {
vma               551 mm/mlock.c     		ret = split_vma(mm, vma, end, 0);
vma               574 mm/mlock.c     		vma->vm_flags = newflags;
vma               576 mm/mlock.c     		munlock_vma_pages_range(vma, start, end);
vma               579 mm/mlock.c     	*prev = vma;
vma               587 mm/mlock.c     	struct vm_area_struct * vma, * prev;
vma               597 mm/mlock.c     	vma = find_vma(current->mm, start);
vma               598 mm/mlock.c     	if (!vma || vma->vm_start > start)
vma               601 mm/mlock.c     	prev = vma->vm_prev;
vma               602 mm/mlock.c     	if (start > vma->vm_start)
vma               603 mm/mlock.c     		prev = vma;
vma               606 mm/mlock.c     		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
vma               611 mm/mlock.c     		tmp = vma->vm_end;
vma               614 mm/mlock.c     		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
vma               623 mm/mlock.c     		vma = prev->vm_next;
vma               624 mm/mlock.c     		if (!vma || vma->vm_start != nstart) {
vma               642 mm/mlock.c     	struct vm_area_struct *vma;
vma               648 mm/mlock.c     	vma = find_vma(mm, start);
vma               649 mm/mlock.c     	if (vma == NULL)
vma               650 mm/mlock.c     		vma = mm->mmap;
vma               652 mm/mlock.c     	for (; vma ; vma = vma->vm_next) {
vma               653 mm/mlock.c     		if (start >= vma->vm_end)
vma               655 mm/mlock.c     		if (start + len <=  vma->vm_start)
vma               657 mm/mlock.c     		if (vma->vm_flags & VM_LOCKED) {
vma               658 mm/mlock.c     			if (start > vma->vm_start)
vma               659 mm/mlock.c     				count -= (start - vma->vm_start);
vma               660 mm/mlock.c     			if (start + len < vma->vm_end) {
vma               661 mm/mlock.c     				count += start + len - vma->vm_start;
vma               664 mm/mlock.c     			count += vma->vm_end - vma->vm_start;
vma               765 mm/mlock.c     	struct vm_area_struct * vma, * prev = NULL;
vma               785 mm/mlock.c     	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
vma               788 mm/mlock.c     		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
vma               792 mm/mlock.c     		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
vma                77 mm/mmap.c      		struct vm_area_struct *vma, struct vm_area_struct *prev,
vma               122 mm/mmap.c      void vma_set_page_prot(struct vm_area_struct *vma)
vma               124 mm/mmap.c      	unsigned long vm_flags = vma->vm_flags;
vma               127 mm/mmap.c      	vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
vma               128 mm/mmap.c      	if (vma_wants_writenotify(vma, vm_page_prot)) {
vma               133 mm/mmap.c      	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
vma               139 mm/mmap.c      static void __remove_shared_vm_struct(struct vm_area_struct *vma,
vma               142 mm/mmap.c      	if (vma->vm_flags & VM_DENYWRITE)
vma               144 mm/mmap.c      	if (vma->vm_flags & VM_SHARED)
vma               148 mm/mmap.c      	vma_interval_tree_remove(vma, &mapping->i_mmap);
vma               156 mm/mmap.c      void unlink_file_vma(struct vm_area_struct *vma)
vma               158 mm/mmap.c      	struct file *file = vma->vm_file;
vma               163 mm/mmap.c      		__remove_shared_vm_struct(vma, file, mapping);
vma               171 mm/mmap.c      static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
vma               173 mm/mmap.c      	struct vm_area_struct *next = vma->vm_next;
vma               176 mm/mmap.c      	if (vma->vm_ops && vma->vm_ops->close)
vma               177 mm/mmap.c      		vma->vm_ops->close(vma);
vma               178 mm/mmap.c      	if (vma->vm_file)
vma               179 mm/mmap.c      		fput(vma->vm_file);
vma               180 mm/mmap.c      	mpol_put(vma_policy(vma));
vma               181 mm/mmap.c      	vm_area_free(vma);
vma               286 mm/mmap.c      static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
vma               296 mm/mmap.c      	gap = vm_start_gap(vma);
vma               297 mm/mmap.c      	if (vma->vm_prev) {
vma               298 mm/mmap.c      		prev_end = vm_end_gap(vma->vm_prev);
vma               308 mm/mmap.c      static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
vma               310 mm/mmap.c      	unsigned long max = vma_compute_gap(vma), subtree_gap;
vma               311 mm/mmap.c      	if (vma->vm_rb.rb_left) {
vma               312 mm/mmap.c      		subtree_gap = rb_entry(vma->vm_rb.rb_left,
vma               317 mm/mmap.c      	if (vma->vm_rb.rb_right) {
vma               318 mm/mmap.c      		subtree_gap = rb_entry(vma->vm_rb.rb_right,
vma               334 mm/mmap.c      		struct vm_area_struct *vma;
vma               335 mm/mmap.c      		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
vma               336 mm/mmap.c      		if (vma->vm_start < prev) {
vma               338 mm/mmap.c      				  vma->vm_start, prev);
vma               341 mm/mmap.c      		if (vma->vm_start < pend) {
vma               343 mm/mmap.c      				  vma->vm_start, pend);
vma               346 mm/mmap.c      		if (vma->vm_start > vma->vm_end) {
vma               348 mm/mmap.c      				  vma->vm_start, vma->vm_end);
vma               352 mm/mmap.c      		if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
vma               354 mm/mmap.c      			       vma->rb_subtree_gap,
vma               355 mm/mmap.c      			       vma_compute_subtree_gap(vma));
vma               361 mm/mmap.c      		prev = vma->vm_start;
vma               362 mm/mmap.c      		pend = vma->vm_end;
vma               379 mm/mmap.c      		struct vm_area_struct *vma;
vma               380 mm/mmap.c      		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
vma               381 mm/mmap.c      		VM_BUG_ON_VMA(vma != ignore &&
vma               382 mm/mmap.c      			vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
vma               383 mm/mmap.c      			vma);
vma               392 mm/mmap.c      	struct vm_area_struct *vma = mm->mmap;
vma               394 mm/mmap.c      	while (vma) {
vma               395 mm/mmap.c      		struct anon_vma *anon_vma = vma->anon_vma;
vma               400 mm/mmap.c      			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vma               405 mm/mmap.c      		highest_address = vm_end_gap(vma);
vma               406 mm/mmap.c      		vma = vma->vm_next;
vma               440 mm/mmap.c      static void vma_gap_update(struct vm_area_struct *vma)
vma               446 mm/mmap.c      	vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
vma               449 mm/mmap.c      static inline void vma_rb_insert(struct vm_area_struct *vma,
vma               455 mm/mmap.c      	rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
vma               458 mm/mmap.c      static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
vma               465 mm/mmap.c      	rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
vma               468 mm/mmap.c      static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
vma               479 mm/mmap.c      	__vma_rb_erase(vma, root);
vma               482 mm/mmap.c      static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
vma               489 mm/mmap.c      	validate_mm_rb(root, vma);
vma               491 mm/mmap.c      	__vma_rb_erase(vma, root);
vma               509 mm/mmap.c      anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
vma               513 mm/mmap.c      	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vma               518 mm/mmap.c      anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
vma               522 mm/mmap.c      	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vma               564 mm/mmap.c      	struct vm_area_struct *vma;
vma               567 mm/mmap.c      	vma = find_vma_intersection(mm, addr, end);
vma               568 mm/mmap.c      	if (!vma)
vma               571 mm/mmap.c      	nr_pages = (min(end, vma->vm_end) -
vma               572 mm/mmap.c      		max(addr, vma->vm_start)) >> PAGE_SHIFT;
vma               575 mm/mmap.c      	for (vma = vma->vm_next; vma; vma = vma->vm_next) {
vma               578 mm/mmap.c      		if (vma->vm_start > end)
vma               581 mm/mmap.c      		overlap_len = min(end, vma->vm_end) - vma->vm_start;
vma               588 mm/mmap.c      void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
vma               592 mm/mmap.c      	if (vma->vm_next)
vma               593 mm/mmap.c      		vma_gap_update(vma->vm_next);
vma               595 mm/mmap.c      		mm->highest_vm_end = vm_end_gap(vma);
vma               606 mm/mmap.c      	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
vma               607 mm/mmap.c      	vma->rb_subtree_gap = 0;
vma               608 mm/mmap.c      	vma_gap_update(vma);
vma               609 mm/mmap.c      	vma_rb_insert(vma, &mm->mm_rb);
vma               612 mm/mmap.c      static void __vma_link_file(struct vm_area_struct *vma)
vma               616 mm/mmap.c      	file = vma->vm_file;
vma               620 mm/mmap.c      		if (vma->vm_flags & VM_DENYWRITE)
vma               622 mm/mmap.c      		if (vma->vm_flags & VM_SHARED)
vma               626 mm/mmap.c      		vma_interval_tree_insert(vma, &mapping->i_mmap);
vma               632 mm/mmap.c      __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
vma               636 mm/mmap.c      	__vma_link_list(mm, vma, prev, rb_parent);
vma               637 mm/mmap.c      	__vma_link_rb(mm, vma, rb_link, rb_parent);
vma               640 mm/mmap.c      static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
vma               646 mm/mmap.c      	if (vma->vm_file) {
vma               647 mm/mmap.c      		mapping = vma->vm_file->f_mapping;
vma               651 mm/mmap.c      	__vma_link(mm, vma, prev, rb_link, rb_parent);
vma               652 mm/mmap.c      	__vma_link_file(vma);
vma               665 mm/mmap.c      static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
vma               670 mm/mmap.c      	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
vma               673 mm/mmap.c      	__vma_link(mm, vma, prev, rb_link, rb_parent);
vma               678 mm/mmap.c      						struct vm_area_struct *vma,
vma               685 mm/mmap.c      	vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
vma               686 mm/mmap.c      	next = vma->vm_next;
vma               690 mm/mmap.c      		prev = vma->vm_prev;
vma               704 mm/mmap.c      				     struct vm_area_struct *vma,
vma               707 mm/mmap.c      	__vma_unlink_common(mm, vma, prev, true, vma);
vma               717 mm/mmap.c      int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
vma               721 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
vma               722 mm/mmap.c      	struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
vma               726 mm/mmap.c      	struct file *file = vma->vm_file;
vma               754 mm/mmap.c      				swap(vma, next);
vma               756 mm/mmap.c      				VM_WARN_ON(expand != vma);
vma               771 mm/mmap.c      			importer = vma;
vma               787 mm/mmap.c      			importer = vma;
vma               789 mm/mmap.c      		} else if (end < vma->vm_end) {
vma               795 mm/mmap.c      			adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
vma               796 mm/mmap.c      			exporter = vma;
vma               821 mm/mmap.c      		uprobe_munmap(vma, vma->vm_start, vma->vm_end);
vma               838 mm/mmap.c      	anon_vma = vma->anon_vma;
vma               845 mm/mmap.c      		anon_vma_interval_tree_pre_update_vma(vma);
vma               852 mm/mmap.c      		vma_interval_tree_remove(vma, root);
vma               857 mm/mmap.c      	if (start != vma->vm_start) {
vma               858 mm/mmap.c      		vma->vm_start = start;
vma               861 mm/mmap.c      	if (end != vma->vm_end) {
vma               862 mm/mmap.c      		vma->vm_end = end;
vma               865 mm/mmap.c      	vma->vm_pgoff = pgoff;
vma               874 mm/mmap.c      		vma_interval_tree_insert(vma, root);
vma               884 mm/mmap.c      			__vma_unlink_prev(mm, next, vma);
vma               895 mm/mmap.c      			__vma_unlink_common(mm, next, NULL, false, vma);
vma               907 mm/mmap.c      			vma_gap_update(vma);
vma               910 mm/mmap.c      				mm->highest_vm_end = vm_end_gap(vma);
vma               917 mm/mmap.c      		anon_vma_interval_tree_post_update_vma(vma);
vma               926 mm/mmap.c      		uprobe_mmap(vma);
vma               938 mm/mmap.c      			anon_vma_merge(vma, next);
vma               954 mm/mmap.c      			next = vma->vm_next;
vma               966 mm/mmap.c      			next = vma;
vma               995 mm/mmap.c      			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
vma              1010 mm/mmap.c      static inline int is_mergeable_vma(struct vm_area_struct *vma,
vma              1022 mm/mmap.c      	if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
vma              1024 mm/mmap.c      	if (vma->vm_file != file)
vma              1026 mm/mmap.c      	if (vma->vm_ops && vma->vm_ops->close)
vma              1028 mm/mmap.c      	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
vma              1035 mm/mmap.c      					struct vm_area_struct *vma)
vma              1041 mm/mmap.c      	if ((!anon_vma1 || !anon_vma2) && (!vma ||
vma              1042 mm/mmap.c      		list_is_singular(&vma->anon_vma_chain)))
vma              1059 mm/mmap.c      can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
vma              1064 mm/mmap.c      	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
vma              1065 mm/mmap.c      	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
vma              1066 mm/mmap.c      		if (vma->vm_pgoff == vm_pgoff)
vma              1080 mm/mmap.c      can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
vma              1085 mm/mmap.c      	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
vma              1086 mm/mmap.c      	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
vma              1088 mm/mmap.c      		vm_pglen = vma_pages(vma);
vma              1089 mm/mmap.c      		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
vma              1291 mm/mmap.c      struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
vma              1296 mm/mmap.c      	near = vma->vm_next;
vma              1300 mm/mmap.c      	anon_vma = reusable_anon_vma(near, vma, near);
vma              1304 mm/mmap.c      	near = vma->vm_prev;
vma              1308 mm/mmap.c      	anon_vma = reusable_anon_vma(near, near, vma);
vma              1441 mm/mmap.c      		struct vm_area_struct *vma = find_vma(mm, addr);
vma              1443 mm/mmap.c      		if (vma && vma->vm_start < addr + len)
vma              1663 mm/mmap.c      int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
vma              1665 mm/mmap.c      	vm_flags_t vm_flags = vma->vm_flags;
vma              1666 mm/mmap.c      	const struct vm_operations_struct *vm_ops = vma->vm_ops;
vma              1691 mm/mmap.c      	return vma->vm_file && vma->vm_file->f_mapping &&
vma              1692 mm/mmap.c      		mapping_cap_account_dirty(vma->vm_file->f_mapping);
vma              1716 mm/mmap.c      	struct vm_area_struct *vma, *prev;
vma              1756 mm/mmap.c      	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
vma              1758 mm/mmap.c      	if (vma)
vma              1766 mm/mmap.c      	vma = vm_area_alloc(mm);
vma              1767 mm/mmap.c      	if (!vma) {
vma              1772 mm/mmap.c      	vma->vm_start = addr;
vma              1773 mm/mmap.c      	vma->vm_end = addr + len;
vma              1774 mm/mmap.c      	vma->vm_flags = vm_flags;
vma              1775 mm/mmap.c      	vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma              1776 mm/mmap.c      	vma->vm_pgoff = pgoff;
vma              1795 mm/mmap.c      		vma->vm_file = get_file(file);
vma              1796 mm/mmap.c      		error = call_mmap(file, vma);
vma              1807 mm/mmap.c      		WARN_ON_ONCE(addr != vma->vm_start);
vma              1809 mm/mmap.c      		addr = vma->vm_start;
vma              1810 mm/mmap.c      		vm_flags = vma->vm_flags;
vma              1812 mm/mmap.c      		error = shmem_zero_setup(vma);
vma              1816 mm/mmap.c      		vma_set_anonymous(vma);
vma              1819 mm/mmap.c      	vma_link(mm, vma, prev, rb_link, rb_parent);
vma              1827 mm/mmap.c      	file = vma->vm_file;
vma              1829 mm/mmap.c      	perf_event_mmap(vma);
vma              1833 mm/mmap.c      		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
vma              1834 mm/mmap.c      					is_vm_hugetlb_page(vma) ||
vma              1835 mm/mmap.c      					vma == get_gate_vma(current->mm))
vma              1836 mm/mmap.c      			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
vma              1842 mm/mmap.c      		uprobe_mmap(vma);
vma              1851 mm/mmap.c      	vma->vm_flags |= VM_SOFTDIRTY;
vma              1853 mm/mmap.c      	vma_set_page_prot(vma);
vma              1858 mm/mmap.c      	vma->vm_file = NULL;
vma              1862 mm/mmap.c      	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
vma              1870 mm/mmap.c      	vm_area_free(vma);
vma              1888 mm/mmap.c      	struct vm_area_struct *vma;
vma              1908 mm/mmap.c      	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
vma              1909 mm/mmap.c      	if (vma->rb_subtree_gap < length)
vma              1914 mm/mmap.c      		gap_end = vm_start_gap(vma);
vma              1915 mm/mmap.c      		if (gap_end >= low_limit && vma->vm_rb.rb_left) {
vma              1917 mm/mmap.c      				rb_entry(vma->vm_rb.rb_left,
vma              1920 mm/mmap.c      				vma = left;
vma              1925 mm/mmap.c      		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
vma              1935 mm/mmap.c      		if (vma->vm_rb.rb_right) {
vma              1937 mm/mmap.c      				rb_entry(vma->vm_rb.rb_right,
vma              1940 mm/mmap.c      				vma = right;
vma              1947 mm/mmap.c      			struct rb_node *prev = &vma->vm_rb;
vma              1950 mm/mmap.c      			vma = rb_entry(rb_parent(prev),
vma              1952 mm/mmap.c      			if (prev == vma->vm_rb.rb_left) {
vma              1953 mm/mmap.c      				gap_start = vm_end_gap(vma->vm_prev);
vma              1954 mm/mmap.c      				gap_end = vm_start_gap(vma);
vma              1983 mm/mmap.c      	struct vm_area_struct *vma;
vma              2012 mm/mmap.c      	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
vma              2013 mm/mmap.c      	if (vma->rb_subtree_gap < length)
vma              2018 mm/mmap.c      		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
vma              2019 mm/mmap.c      		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
vma              2021 mm/mmap.c      				rb_entry(vma->vm_rb.rb_right,
vma              2024 mm/mmap.c      				vma = right;
vma              2031 mm/mmap.c      		gap_end = vm_start_gap(vma);
vma              2039 mm/mmap.c      		if (vma->vm_rb.rb_left) {
vma              2041 mm/mmap.c      				rb_entry(vma->vm_rb.rb_left,
vma              2044 mm/mmap.c      				vma = left;
vma              2051 mm/mmap.c      			struct rb_node *prev = &vma->vm_rb;
vma              2054 mm/mmap.c      			vma = rb_entry(rb_parent(prev),
vma              2056 mm/mmap.c      			if (prev == vma->vm_rb.rb_right) {
vma              2057 mm/mmap.c      				gap_start = vma->vm_prev ?
vma              2058 mm/mmap.c      					vm_end_gap(vma->vm_prev) : 0;
vma              2105 mm/mmap.c      	struct vm_area_struct *vma, *prev;
vma              2117 mm/mmap.c      		vma = find_vma_prev(mm, addr, &prev);
vma              2119 mm/mmap.c      		    (!vma || addr + len <= vm_start_gap(vma)) &&
vma              2143 mm/mmap.c      	struct vm_area_struct *vma, *prev;
vma              2158 mm/mmap.c      		vma = find_vma_prev(mm, addr, &prev);
vma              2160 mm/mmap.c      				(!vma || addr + len <= vm_start_gap(vma)) &&
vma              2238 mm/mmap.c      	struct vm_area_struct *vma;
vma              2241 mm/mmap.c      	vma = vmacache_find(mm, addr);
vma              2242 mm/mmap.c      	if (likely(vma))
vma              2243 mm/mmap.c      		return vma;
vma              2253 mm/mmap.c      			vma = tmp;
vma              2261 mm/mmap.c      	if (vma)
vma              2262 mm/mmap.c      		vmacache_update(addr, vma);
vma              2263 mm/mmap.c      	return vma;
vma              2275 mm/mmap.c      	struct vm_area_struct *vma;
vma              2277 mm/mmap.c      	vma = find_vma(mm, addr);
vma              2278 mm/mmap.c      	if (vma) {
vma              2279 mm/mmap.c      		*pprev = vma->vm_prev;
vma              2285 mm/mmap.c      	return vma;
vma              2293 mm/mmap.c      static int acct_stack_growth(struct vm_area_struct *vma,
vma              2296 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
vma              2300 mm/mmap.c      	if (!may_expand_vm(mm, vma->vm_flags, grow))
vma              2308 mm/mmap.c      	if (vma->vm_flags & VM_LOCKED) {
vma              2319 mm/mmap.c      	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
vma              2320 mm/mmap.c      			vma->vm_end - size;
vma              2321 mm/mmap.c      	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
vma              2339 mm/mmap.c      int expand_upwards(struct vm_area_struct *vma, unsigned long address)
vma              2341 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
vma              2346 mm/mmap.c      	if (!(vma->vm_flags & VM_GROWSUP))
vma              2362 mm/mmap.c      	next = vma->vm_next;
vma              2371 mm/mmap.c      	if (unlikely(anon_vma_prepare(vma)))
vma              2379 mm/mmap.c      	anon_vma_lock_write(vma->anon_vma);
vma              2382 mm/mmap.c      	if (address > vma->vm_end) {
vma              2385 mm/mmap.c      		size = address - vma->vm_start;
vma              2386 mm/mmap.c      		grow = (address - vma->vm_end) >> PAGE_SHIFT;
vma              2389 mm/mmap.c      		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
vma              2390 mm/mmap.c      			error = acct_stack_growth(vma, size, grow);
vma              2404 mm/mmap.c      				if (vma->vm_flags & VM_LOCKED)
vma              2406 mm/mmap.c      				vm_stat_account(mm, vma->vm_flags, grow);
vma              2407 mm/mmap.c      				anon_vma_interval_tree_pre_update_vma(vma);
vma              2408 mm/mmap.c      				vma->vm_end = address;
vma              2409 mm/mmap.c      				anon_vma_interval_tree_post_update_vma(vma);
vma              2410 mm/mmap.c      				if (vma->vm_next)
vma              2411 mm/mmap.c      					vma_gap_update(vma->vm_next);
vma              2413 mm/mmap.c      					mm->highest_vm_end = vm_end_gap(vma);
vma              2416 mm/mmap.c      				perf_event_mmap(vma);
vma              2420 mm/mmap.c      	anon_vma_unlock_write(vma->anon_vma);
vma              2421 mm/mmap.c      	khugepaged_enter_vma_merge(vma, vma->vm_flags);
vma              2430 mm/mmap.c      int expand_downwards(struct vm_area_struct *vma,
vma              2433 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
vma              2442 mm/mmap.c      	prev = vma->vm_prev;
vma              2451 mm/mmap.c      	if (unlikely(anon_vma_prepare(vma)))
vma              2459 mm/mmap.c      	anon_vma_lock_write(vma->anon_vma);
vma              2462 mm/mmap.c      	if (address < vma->vm_start) {
vma              2465 mm/mmap.c      		size = vma->vm_end - address;
vma              2466 mm/mmap.c      		grow = (vma->vm_start - address) >> PAGE_SHIFT;
vma              2469 mm/mmap.c      		if (grow <= vma->vm_pgoff) {
vma              2470 mm/mmap.c      			error = acct_stack_growth(vma, size, grow);
vma              2484 mm/mmap.c      				if (vma->vm_flags & VM_LOCKED)
vma              2486 mm/mmap.c      				vm_stat_account(mm, vma->vm_flags, grow);
vma              2487 mm/mmap.c      				anon_vma_interval_tree_pre_update_vma(vma);
vma              2488 mm/mmap.c      				vma->vm_start = address;
vma              2489 mm/mmap.c      				vma->vm_pgoff -= grow;
vma              2490 mm/mmap.c      				anon_vma_interval_tree_post_update_vma(vma);
vma              2491 mm/mmap.c      				vma_gap_update(vma);
vma              2494 mm/mmap.c      				perf_event_mmap(vma);
vma              2498 mm/mmap.c      	anon_vma_unlock_write(vma->anon_vma);
vma              2499 mm/mmap.c      	khugepaged_enter_vma_merge(vma, vma->vm_flags);
vma              2521 mm/mmap.c      int expand_stack(struct vm_area_struct *vma, unsigned long address)
vma              2523 mm/mmap.c      	return expand_upwards(vma, address);
vma              2529 mm/mmap.c      	struct vm_area_struct *vma, *prev;
vma              2532 mm/mmap.c      	vma = find_vma_prev(mm, addr, &prev);
vma              2533 mm/mmap.c      	if (vma && (vma->vm_start <= addr))
vma              2534 mm/mmap.c      		return vma;
vma              2543 mm/mmap.c      int expand_stack(struct vm_area_struct *vma, unsigned long address)
vma              2545 mm/mmap.c      	return expand_downwards(vma, address);
vma              2551 mm/mmap.c      	struct vm_area_struct *vma;
vma              2555 mm/mmap.c      	vma = find_vma(mm, addr);
vma              2556 mm/mmap.c      	if (!vma)
vma              2558 mm/mmap.c      	if (vma->vm_start <= addr)
vma              2559 mm/mmap.c      		return vma;
vma              2560 mm/mmap.c      	if (!(vma->vm_flags & VM_GROWSDOWN))
vma              2565 mm/mmap.c      	start = vma->vm_start;
vma              2566 mm/mmap.c      	if (expand_stack(vma, addr))
vma              2568 mm/mmap.c      	if (vma->vm_flags & VM_LOCKED)
vma              2569 mm/mmap.c      		populate_vma_page_range(vma, addr, start, NULL);
vma              2570 mm/mmap.c      	return vma;
vma              2582 mm/mmap.c      static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
vma              2589 mm/mmap.c      		long nrpages = vma_pages(vma);
vma              2591 mm/mmap.c      		if (vma->vm_flags & VM_ACCOUNT)
vma              2593 mm/mmap.c      		vm_stat_account(mm, vma->vm_flags, -nrpages);
vma              2594 mm/mmap.c      		vma = remove_vma(vma);
vma              2595 mm/mmap.c      	} while (vma);
vma              2606 mm/mmap.c      		struct vm_area_struct *vma, struct vm_area_struct *prev,
vma              2615 mm/mmap.c      	unmap_vmas(&tlb, vma, start, end);
vma              2616 mm/mmap.c      	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
vma              2626 mm/mmap.c      detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
vma              2633 mm/mmap.c      	vma->vm_prev = NULL;
vma              2635 mm/mmap.c      		vma_rb_erase(vma, &mm->mm_rb);
vma              2637 mm/mmap.c      		tail_vma = vma;
vma              2638 mm/mmap.c      		vma = vma->vm_next;
vma              2639 mm/mmap.c      	} while (vma && vma->vm_start < end);
vma              2640 mm/mmap.c      	*insertion_point = vma;
vma              2641 mm/mmap.c      	if (vma) {
vma              2642 mm/mmap.c      		vma->vm_prev = prev;
vma              2643 mm/mmap.c      		vma_gap_update(vma);
vma              2656 mm/mmap.c      int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
vma              2662 mm/mmap.c      	if (vma->vm_ops && vma->vm_ops->split) {
vma              2663 mm/mmap.c      		err = vma->vm_ops->split(vma, addr);
vma              2668 mm/mmap.c      	new = vm_area_dup(vma);
vma              2676 mm/mmap.c      		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
vma              2679 mm/mmap.c      	err = vma_dup_policy(vma, new);
vma              2683 mm/mmap.c      	err = anon_vma_clone(new, vma);
vma              2694 mm/mmap.c      		err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
vma              2697 mm/mmap.c      		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
vma              2720 mm/mmap.c      int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
vma              2726 mm/mmap.c      	return __split_vma(mm, vma, addr, new_below);
vma              2738 mm/mmap.c      	struct vm_area_struct *vma, *prev, *last;
vma              2756 mm/mmap.c      	vma = find_vma(mm, start);
vma              2757 mm/mmap.c      	if (!vma)
vma              2759 mm/mmap.c      	prev = vma->vm_prev;
vma              2763 mm/mmap.c      	if (vma->vm_start >= end)
vma              2773 mm/mmap.c      	if (start > vma->vm_start) {
vma              2781 mm/mmap.c      		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
vma              2784 mm/mmap.c      		error = __split_vma(mm, vma, start, 0);
vma              2787 mm/mmap.c      		prev = vma;
vma              2797 mm/mmap.c      	vma = prev ? prev->vm_next : mm->mmap;
vma              2809 mm/mmap.c      		int error = userfaultfd_unmap_prep(vma, start, end, uf);
vma              2818 mm/mmap.c      		struct vm_area_struct *tmp = vma;
vma              2830 mm/mmap.c      	detach_vmas_to_be_unmapped(mm, vma, prev, end);
vma              2835 mm/mmap.c      	unmap_region(mm, vma, prev, start, end);
vma              2838 mm/mmap.c      	remove_vma_list(mm, vma);
vma              2896 mm/mmap.c      	struct vm_area_struct *vma;
vma              2919 mm/mmap.c      	vma = find_vma(mm, start);
vma              2921 mm/mmap.c      	if (!vma || !(vma->vm_flags & VM_SHARED))
vma              2924 mm/mmap.c      	if (start < vma->vm_start)
vma              2927 mm/mmap.c      	if (start + size > vma->vm_end) {
vma              2930 mm/mmap.c      		for (next = vma->vm_next; next; next = next->vm_next) {
vma              2935 mm/mmap.c      			if (next->vm_file != vma->vm_file)
vma              2938 mm/mmap.c      			if (next->vm_flags != vma->vm_flags)
vma              2949 mm/mmap.c      	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
vma              2950 mm/mmap.c      	prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
vma              2951 mm/mmap.c      	prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
vma              2955 mm/mmap.c      	if (vma->vm_flags & VM_LOCKED) {
vma              2960 mm/mmap.c      		for (tmp = vma; tmp->vm_start >= start + size;
vma              2974 mm/mmap.c      	file = get_file(vma->vm_file);
vma              2975 mm/mmap.c      	ret = do_mmap_pgoff(vma->vm_file, start, size,
vma              2995 mm/mmap.c      	struct vm_area_struct *vma, *prev;
vma              3033 mm/mmap.c      	vma = vma_merge(mm, prev, addr, addr + len, flags,
vma              3035 mm/mmap.c      	if (vma)
vma              3041 mm/mmap.c      	vma = vm_area_alloc(mm);
vma              3042 mm/mmap.c      	if (!vma) {
vma              3047 mm/mmap.c      	vma_set_anonymous(vma);
vma              3048 mm/mmap.c      	vma->vm_start = addr;
vma              3049 mm/mmap.c      	vma->vm_end = addr + len;
vma              3050 mm/mmap.c      	vma->vm_pgoff = pgoff;
vma              3051 mm/mmap.c      	vma->vm_flags = flags;
vma              3052 mm/mmap.c      	vma->vm_page_prot = vm_get_page_prot(flags);
vma              3053 mm/mmap.c      	vma_link(mm, vma, prev, rb_link, rb_parent);
vma              3055 mm/mmap.c      	perf_event_mmap(vma);
vma              3060 mm/mmap.c      	vma->vm_flags |= VM_SOFTDIRTY;
vma              3101 mm/mmap.c      	struct vm_area_struct *vma;
vma              3132 mm/mmap.c      		vma = mm->mmap;
vma              3133 mm/mmap.c      		while (vma) {
vma              3134 mm/mmap.c      			if (vma->vm_flags & VM_LOCKED)
vma              3135 mm/mmap.c      				munlock_vma_pages_all(vma);
vma              3136 mm/mmap.c      			vma = vma->vm_next;
vma              3142 mm/mmap.c      	vma = mm->mmap;
vma              3143 mm/mmap.c      	if (!vma)	/* Can happen if dup_mmap() received an OOM */
vma              3151 mm/mmap.c      	unmap_vmas(&tlb, vma, 0, -1);
vma              3152 mm/mmap.c      	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
vma              3159 mm/mmap.c      	while (vma) {
vma              3160 mm/mmap.c      		if (vma->vm_flags & VM_ACCOUNT)
vma              3161 mm/mmap.c      			nr_accounted += vma_pages(vma);
vma              3162 mm/mmap.c      		vma = remove_vma(vma);
vma              3171 mm/mmap.c      int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
vma              3176 mm/mmap.c      	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
vma              3179 mm/mmap.c      	if ((vma->vm_flags & VM_ACCOUNT) &&
vma              3180 mm/mmap.c      	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
vma              3195 mm/mmap.c      	if (vma_is_anonymous(vma)) {
vma              3196 mm/mmap.c      		BUG_ON(vma->anon_vma);
vma              3197 mm/mmap.c      		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
vma              3200 mm/mmap.c      	vma_link(mm, vma, prev, rb_link, rb_parent);
vma              3212 mm/mmap.c      	struct vm_area_struct *vma = *vmap;
vma              3213 mm/mmap.c      	unsigned long vma_start = vma->vm_start;
vma              3214 mm/mmap.c      	struct mm_struct *mm = vma->vm_mm;
vma              3223 mm/mmap.c      	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
vma              3230 mm/mmap.c      	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma              3231 mm/mmap.c      			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma              3232 mm/mmap.c      			    vma->vm_userfaultfd_ctx);
vma              3252 mm/mmap.c      			*vmap = vma = new_vma;
vma              3254 mm/mmap.c      		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
vma              3256 mm/mmap.c      		new_vma = vm_area_dup(vma);
vma              3262 mm/mmap.c      		if (vma_dup_policy(vma, new_vma))
vma              3264 mm/mmap.c      		if (anon_vma_clone(new_vma, vma))
vma              3329 mm/mmap.c      static void special_mapping_close(struct vm_area_struct *vma)
vma              3333 mm/mmap.c      static const char *special_mapping_name(struct vm_area_struct *vma)
vma              3335 mm/mmap.c      	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
vma              3365 mm/mmap.c      	struct vm_area_struct *vma = vmf->vma;
vma              3369 mm/mmap.c      	if (vma->vm_ops == &legacy_special_mapping_vmops) {
vma              3370 mm/mmap.c      		pages = vma->vm_private_data;
vma              3372 mm/mmap.c      		struct vm_special_mapping *sm = vma->vm_private_data;
vma              3375 mm/mmap.c      			return sm->fault(sm, vmf->vma, vmf);
vma              3400 mm/mmap.c      	struct vm_area_struct *vma;
vma              3402 mm/mmap.c      	vma = vm_area_alloc(mm);
vma              3403 mm/mmap.c      	if (unlikely(vma == NULL))
vma              3406 mm/mmap.c      	vma->vm_start = addr;
vma              3407 mm/mmap.c      	vma->vm_end = addr + len;
vma              3409 mm/mmap.c      	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
vma              3410 mm/mmap.c      	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma              3412 mm/mmap.c      	vma->vm_ops = ops;
vma              3413 mm/mmap.c      	vma->vm_private_data = priv;
vma              3415 mm/mmap.c      	ret = insert_vm_struct(mm, vma);
vma              3419 mm/mmap.c      	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
vma              3421 mm/mmap.c      	perf_event_mmap(vma);
vma              3423 mm/mmap.c      	return vma;
vma              3426 mm/mmap.c      	vm_area_free(vma);
vma              3430 mm/mmap.c      bool vma_is_special_mapping(const struct vm_area_struct *vma,
vma              3433 mm/mmap.c      	return vma->vm_private_data == sm &&
vma              3434 mm/mmap.c      		(vma->vm_ops == &special_mapping_vmops ||
vma              3435 mm/mmap.c      		 vma->vm_ops == &legacy_special_mapping_vmops);
vma              3460 mm/mmap.c      	struct vm_area_struct *vma = __install_special_mapping(
vma              3464 mm/mmap.c      	return PTR_ERR_OR_ZERO(vma);
vma              3549 mm/mmap.c      	struct vm_area_struct *vma;
vma              3556 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              3559 mm/mmap.c      		if (vma->vm_file && vma->vm_file->f_mapping &&
vma              3560 mm/mmap.c      				is_vm_hugetlb_page(vma))
vma              3561 mm/mmap.c      			vm_lock_mapping(mm, vma->vm_file->f_mapping);
vma              3564 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              3567 mm/mmap.c      		if (vma->vm_file && vma->vm_file->f_mapping &&
vma              3568 mm/mmap.c      				!is_vm_hugetlb_page(vma))
vma              3569 mm/mmap.c      			vm_lock_mapping(mm, vma->vm_file->f_mapping);
vma              3572 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              3575 mm/mmap.c      		if (vma->anon_vma)
vma              3576 mm/mmap.c      			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vma              3629 mm/mmap.c      	struct vm_area_struct *vma;
vma              3635 mm/mmap.c      	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              3636 mm/mmap.c      		if (vma->anon_vma)
vma              3637 mm/mmap.c      			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vma              3639 mm/mmap.c      		if (vma->vm_file && vma->vm_file->f_mapping)
vma              3640 mm/mmap.c      			vm_unlock_mapping(vma->vm_file->f_mapping);
vma               541 mm/mmu_notifier.c 	if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
vma               544 mm/mmu_notifier.c 	return range->vma->vm_flags & VM_READ;
vma                38 mm/mprotect.c  static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
vma                61 mm/mprotect.c  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma                64 mm/mprotect.c  	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
vma                65 mm/mprotect.c  	    atomic_read(&vma->vm_mm->mm_users) == 1)
vma                68 mm/mprotect.c  	flush_tlb_batched_pending(vma->vm_mm);
vma                83 mm/mprotect.c  				page = vm_normal_page(vma, addr, oldpte);
vma                88 mm/mprotect.c  				if (is_cow_mapping(vma->vm_flags) &&
vma               112 mm/mprotect.c  			oldpte = ptep_modify_prot_start(vma, addr, pte);
vma               120 mm/mprotect.c  					 !(vma->vm_flags & VM_SOFTDIRTY))) {
vma               123 mm/mprotect.c  			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
vma               138 mm/mprotect.c  				set_pte_at(vma->vm_mm, addr, pte, newpte);
vma               152 mm/mprotect.c  				set_pte_at(vma->vm_mm, addr, pte, newpte);
vma               189 mm/mprotect.c  static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
vma               223 mm/mprotect.c  				vma, vma->vm_mm, addr, end);
vma               229 mm/mprotect.c  				__split_huge_pmd(vma, pmd, addr, false, NULL);
vma               231 mm/mprotect.c  				int nr_ptes = change_huge_pmd(vma, pmd, addr,
vma               246 mm/mprotect.c  		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
vma               261 mm/mprotect.c  static inline unsigned long change_pud_range(struct vm_area_struct *vma,
vma               274 mm/mprotect.c  		pages += change_pmd_range(vma, pud, addr, next, newprot,
vma               281 mm/mprotect.c  static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
vma               294 mm/mprotect.c  		pages += change_pud_range(vma, p4d, addr, next, newprot,
vma               301 mm/mprotect.c  static unsigned long change_protection_range(struct vm_area_struct *vma,
vma               305 mm/mprotect.c  	struct mm_struct *mm = vma->vm_mm;
vma               313 mm/mprotect.c  	flush_cache_range(vma, addr, end);
vma               319 mm/mprotect.c  		pages += change_p4d_range(vma, pgd, addr, next, newprot,
vma               325 mm/mprotect.c  		flush_tlb_range(vma, start, end);
vma               331 mm/mprotect.c  unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
vma               337 mm/mprotect.c  	if (is_vm_hugetlb_page(vma))
vma               338 mm/mprotect.c  		pages = hugetlb_change_protection(vma, start, end, newprot);
vma               340 mm/mprotect.c  		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
vma               373 mm/mprotect.c  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
vma               376 mm/mprotect.c  	struct mm_struct *mm = vma->vm_mm;
vma               377 mm/mprotect.c  	unsigned long oldflags = vma->vm_flags;
vma               385 mm/mprotect.c  		*pprev = vma;
vma               395 mm/mprotect.c  	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
vma               428 mm/mprotect.c  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
vma               430 mm/mprotect.c  			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma               431 mm/mprotect.c  			   vma->vm_userfaultfd_ctx);
vma               433 mm/mprotect.c  		vma = *pprev;
vma               434 mm/mprotect.c  		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
vma               438 mm/mprotect.c  	*pprev = vma;
vma               440 mm/mprotect.c  	if (start != vma->vm_start) {
vma               441 mm/mprotect.c  		error = split_vma(mm, vma, start, 1);
vma               446 mm/mprotect.c  	if (end != vma->vm_end) {
vma               447 mm/mprotect.c  		error = split_vma(mm, vma, end, 0);
vma               457 mm/mprotect.c  	vma->vm_flags = newflags;
vma               458 mm/mprotect.c  	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma               459 mm/mprotect.c  	vma_set_page_prot(vma);
vma               461 mm/mprotect.c  	change_protection(vma, start, end, vma->vm_page_prot,
vma               470 mm/mprotect.c  		populate_vma_page_range(vma, start, end, NULL);
vma               475 mm/mprotect.c  	perf_event_mmap(vma);
vma               490 mm/mprotect.c  	struct vm_area_struct *vma, *prev;
vma               526 mm/mprotect.c  	vma = find_vma(current->mm, start);
vma               528 mm/mprotect.c  	if (!vma)
vma               530 mm/mprotect.c  	prev = vma->vm_prev;
vma               532 mm/mprotect.c  		if (vma->vm_start >= end)
vma               534 mm/mprotect.c  		start = vma->vm_start;
vma               536 mm/mprotect.c  		if (!(vma->vm_flags & VM_GROWSDOWN))
vma               539 mm/mprotect.c  		if (vma->vm_start > start)
vma               542 mm/mprotect.c  			end = vma->vm_end;
vma               544 mm/mprotect.c  			if (!(vma->vm_flags & VM_GROWSUP))
vma               548 mm/mprotect.c  	if (start > vma->vm_start)
vma               549 mm/mprotect.c  		prev = vma;
vma               559 mm/mprotect.c  		if (rier && (vma->vm_flags & VM_MAYEXEC))
vma               570 mm/mprotect.c  		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
vma               572 mm/mprotect.c  		newflags |= (vma->vm_flags & ~mask_off_old_flags);
vma               580 mm/mprotect.c  		error = security_file_mprotect(vma, reqprot, prot);
vma               584 mm/mprotect.c  		tmp = vma->vm_end;
vma               587 mm/mprotect.c  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
vma               597 mm/mprotect.c  		vma = prev->vm_next;
vma               598 mm/mprotect.c  		if (!vma || vma->vm_start != nstart) {
vma                59 mm/mremap.c    static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
vma                84 mm/mremap.c    static void take_rmap_locks(struct vm_area_struct *vma)
vma                86 mm/mremap.c    	if (vma->vm_file)
vma                87 mm/mremap.c    		i_mmap_lock_write(vma->vm_file->f_mapping);
vma                88 mm/mremap.c    	if (vma->anon_vma)
vma                89 mm/mremap.c    		anon_vma_lock_write(vma->anon_vma);
vma                92 mm/mremap.c    static void drop_rmap_locks(struct vm_area_struct *vma)
vma                94 mm/mremap.c    	if (vma->anon_vma)
vma                95 mm/mremap.c    		anon_vma_unlock_write(vma->anon_vma);
vma                96 mm/mremap.c    	if (vma->vm_file)
vma                97 mm/mremap.c    		i_mmap_unlock_write(vma->vm_file->f_mapping);
vma               115 mm/mremap.c    static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
vma               120 mm/mremap.c    	struct mm_struct *mm = vma->vm_mm;
vma               145 mm/mremap.c    		take_rmap_locks(vma);
vma               156 mm/mremap.c    	flush_tlb_batched_pending(vma->vm_mm);
vma               185 mm/mremap.c    		flush_tlb_range(vma, old_end - len, old_end);
vma               191 mm/mremap.c    		drop_rmap_locks(vma);
vma               195 mm/mremap.c    static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
vma               200 mm/mremap.c    	struct mm_struct *mm = vma->vm_mm;
vma               218 mm/mremap.c    	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
vma               231 mm/mremap.c    	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
vma               240 mm/mremap.c    unsigned long move_page_tables(struct vm_area_struct *vma,
vma               250 mm/mremap.c    	flush_cache_range(vma, old_addr, old_end);
vma               252 mm/mremap.c    	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
vma               263 mm/mremap.c    		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
vma               266 mm/mremap.c    		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
vma               274 mm/mremap.c    					take_rmap_locks(vma);
vma               275 mm/mremap.c    				moved = move_huge_pmd(vma, old_addr, new_addr,
vma               278 mm/mremap.c    					drop_rmap_locks(vma);
vma               282 mm/mremap.c    			split_huge_pmd(vma, old_pmd, old_addr);
vma               294 mm/mremap.c    				take_rmap_locks(vma);
vma               295 mm/mremap.c    			moved = move_normal_pmd(vma, old_addr, new_addr,
vma               298 mm/mremap.c    				drop_rmap_locks(vma);
vma               309 mm/mremap.c    		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
vma               318 mm/mremap.c    static unsigned long move_vma(struct vm_area_struct *vma,
vma               324 mm/mremap.c    	struct mm_struct *mm = vma->vm_mm;
vma               326 mm/mremap.c    	unsigned long vm_flags = vma->vm_flags;
vma               349 mm/mremap.c    	err = ksm_madvise(vma, old_addr, old_addr + old_len,
vma               354 mm/mremap.c    	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
vma               355 mm/mremap.c    	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
vma               360 mm/mremap.c    	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
vma               364 mm/mremap.c    	} else if (vma->vm_ops && vma->vm_ops->mremap) {
vma               365 mm/mremap.c    		err = vma->vm_ops->mremap(new_vma);
vma               374 mm/mremap.c    		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
vma               376 mm/mremap.c    		vma = new_vma;
vma               388 mm/mremap.c    		vma->vm_flags &= ~VM_ACCOUNT;
vma               389 mm/mremap.c    		excess = vma->vm_end - vma->vm_start - old_len;
vma               390 mm/mremap.c    		if (old_addr > vma->vm_start &&
vma               391 mm/mremap.c    		    old_addr + old_len < vma->vm_end)
vma               405 mm/mremap.c    	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
vma               408 mm/mremap.c    	if (unlikely(vma->vm_flags & VM_PFNMAP))
vma               409 mm/mremap.c    		untrack_pfn_moved(vma);
vma               420 mm/mremap.c    		vma->vm_flags |= VM_ACCOUNT;
vma               422 mm/mremap.c    			vma->vm_next->vm_flags |= VM_ACCOUNT;
vma               437 mm/mremap.c    	struct vm_area_struct *vma = find_vma(mm, addr);
vma               440 mm/mremap.c    	if (!vma || vma->vm_start > addr)
vma               451 mm/mremap.c    	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
vma               456 mm/mremap.c    	if (is_vm_hugetlb_page(vma))
vma               460 mm/mremap.c    	if (old_len > vma->vm_end - addr)
vma               464 mm/mremap.c    		return vma;
vma               467 mm/mremap.c    	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
vma               468 mm/mremap.c    	pgoff += vma->vm_pgoff;
vma               472 mm/mremap.c    	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
vma               475 mm/mremap.c    	if (vma->vm_flags & VM_LOCKED) {
vma               484 mm/mremap.c    	if (!may_expand_vm(mm, vma->vm_flags,
vma               488 mm/mremap.c    	if (vma->vm_flags & VM_ACCOUNT) {
vma               495 mm/mremap.c    	return vma;
vma               505 mm/mremap.c    	struct vm_area_struct *vma;
vma               548 mm/mremap.c    	vma = vma_to_resize(addr, old_len, new_len, &charged);
vma               549 mm/mremap.c    	if (IS_ERR(vma)) {
vma               550 mm/mremap.c    		ret = PTR_ERR(vma);
vma               555 mm/mremap.c    	if (vma->vm_flags & VM_MAYSHARE)
vma               558 mm/mremap.c    	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
vma               559 mm/mremap.c    				((addr - vma->vm_start) >> PAGE_SHIFT),
vma               564 mm/mremap.c    	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
vma               575 mm/mremap.c    static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
vma               577 mm/mremap.c    	unsigned long end = vma->vm_end + delta;
vma               578 mm/mremap.c    	if (end < vma->vm_end) /* overflow */
vma               580 mm/mremap.c    	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
vma               582 mm/mremap.c    	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
vma               600 mm/mremap.c    	struct vm_area_struct *vma;
vma               674 mm/mremap.c    	vma = vma_to_resize(addr, old_len, new_len, &charged);
vma               675 mm/mremap.c    	if (IS_ERR(vma)) {
vma               676 mm/mremap.c    		ret = PTR_ERR(vma);
vma               682 mm/mremap.c    	if (old_len == vma->vm_end - addr) {
vma               684 mm/mremap.c    		if (vma_expandable(vma, new_len - old_len)) {
vma               687 mm/mremap.c    			if (vma_adjust(vma, vma->vm_start, addr + new_len,
vma               688 mm/mremap.c    				       vma->vm_pgoff, NULL)) {
vma               693 mm/mremap.c    			vm_stat_account(mm, vma->vm_flags, pages);
vma               694 mm/mremap.c    			if (vma->vm_flags & VM_LOCKED) {
vma               711 mm/mremap.c    		if (vma->vm_flags & VM_MAYSHARE)
vma               714 mm/mremap.c    		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma               715 mm/mremap.c    					vma->vm_pgoff +
vma               716 mm/mremap.c    					((addr - vma->vm_start) >> PAGE_SHIFT),
vma               723 mm/mremap.c    		ret = move_vma(vma, addr, old_len, new_len, new_addr,
vma                36 mm/msync.c     	struct vm_area_struct *vma;
vma                61 mm/msync.c     	vma = find_vma(mm, start);
vma                68 mm/msync.c     		if (!vma)
vma                71 mm/msync.c     		if (start < vma->vm_start) {
vma                72 mm/msync.c     			start = vma->vm_start;
vma                79 mm/msync.c     				(vma->vm_flags & VM_LOCKED)) {
vma                83 mm/msync.c     		file = vma->vm_file;
vma                84 mm/msync.c     		fstart = (start - vma->vm_start) +
vma                85 mm/msync.c     			 ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vma                86 mm/msync.c     		fend = fstart + (min(end, vma->vm_end) - start) - 1;
vma                87 mm/msync.c     		start = vma->vm_end;
vma                89 mm/msync.c     				(vma->vm_flags & VM_SHARED)) {
vma                97 mm/msync.c     			vma = find_vma(mm, start);
vma               103 mm/msync.c     			vma = vma->vm_next;
vma               100 mm/nommu.c     		struct vm_area_struct *vma;
vma               102 mm/nommu.c     		vma = find_vma(current->mm, (unsigned long)objp);
vma               103 mm/nommu.c     		if (vma)
vma               104 mm/nommu.c     			return vma->vm_end - vma->vm_start;
vma               124 mm/nommu.c     int follow_pfn(struct vm_area_struct *vma, unsigned long address,
vma               127 mm/nommu.c     	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
vma               164 mm/nommu.c     		struct vm_area_struct *vma;
vma               167 mm/nommu.c     		vma = find_vma(current->mm, (unsigned long)ret);
vma               168 mm/nommu.c     		if (vma)
vma               169 mm/nommu.c     			vma->vm_flags |= VM_USERMAP;
vma               386 mm/nommu.c     int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
vma               393 mm/nommu.c     int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
vma               400 mm/nommu.c     int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
vma               586 mm/nommu.c     static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
vma               592 mm/nommu.c     	BUG_ON(!vma->vm_region);
vma               595 mm/nommu.c     	vma->vm_mm = mm;
vma               598 mm/nommu.c     	if (vma->vm_file) {
vma               599 mm/nommu.c     		mapping = vma->vm_file->f_mapping;
vma               603 mm/nommu.c     		vma_interval_tree_insert(vma, &mapping->i_mmap);
vma               617 mm/nommu.c     		if (vma->vm_start < pvma->vm_start)
vma               619 mm/nommu.c     		else if (vma->vm_start > pvma->vm_start) {
vma               622 mm/nommu.c     		} else if (vma->vm_end < pvma->vm_end)
vma               624 mm/nommu.c     		else if (vma->vm_end > pvma->vm_end) {
vma               627 mm/nommu.c     		} else if (vma < pvma)
vma               629 mm/nommu.c     		else if (vma > pvma) {
vma               636 mm/nommu.c     	rb_link_node(&vma->vm_rb, parent, p);
vma               637 mm/nommu.c     	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
vma               644 mm/nommu.c     	__vma_link_list(mm, vma, prev, parent);
vma               650 mm/nommu.c     static void delete_vma_from_mm(struct vm_area_struct *vma)
vma               654 mm/nommu.c     	struct mm_struct *mm = vma->vm_mm;
vma               660 mm/nommu.c     		if (curr->vmacache.vmas[i] == vma) {
vma               667 mm/nommu.c     	if (vma->vm_file) {
vma               668 mm/nommu.c     		mapping = vma->vm_file->f_mapping;
vma               672 mm/nommu.c     		vma_interval_tree_remove(vma, &mapping->i_mmap);
vma               678 mm/nommu.c     	rb_erase(&vma->vm_rb, &mm->mm_rb);
vma               680 mm/nommu.c     	if (vma->vm_prev)
vma               681 mm/nommu.c     		vma->vm_prev->vm_next = vma->vm_next;
vma               683 mm/nommu.c     		mm->mmap = vma->vm_next;
vma               685 mm/nommu.c     	if (vma->vm_next)
vma               686 mm/nommu.c     		vma->vm_next->vm_prev = vma->vm_prev;
vma               692 mm/nommu.c     static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
vma               694 mm/nommu.c     	if (vma->vm_ops && vma->vm_ops->close)
vma               695 mm/nommu.c     		vma->vm_ops->close(vma);
vma               696 mm/nommu.c     	if (vma->vm_file)
vma               697 mm/nommu.c     		fput(vma->vm_file);
vma               698 mm/nommu.c     	put_nommu_region(vma->vm_region);
vma               699 mm/nommu.c     	vm_area_free(vma);
vma               708 mm/nommu.c     	struct vm_area_struct *vma;
vma               711 mm/nommu.c     	vma = vmacache_find(mm, addr);
vma               712 mm/nommu.c     	if (likely(vma))
vma               713 mm/nommu.c     		return vma;
vma               717 mm/nommu.c     	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               718 mm/nommu.c     		if (vma->vm_start > addr)
vma               720 mm/nommu.c     		if (vma->vm_end > addr) {
vma               721 mm/nommu.c     			vmacache_update(addr, vma);
vma               722 mm/nommu.c     			return vma;
vma               743 mm/nommu.c     int expand_stack(struct vm_area_struct *vma, unsigned long address)
vma               756 mm/nommu.c     	struct vm_area_struct *vma;
vma               760 mm/nommu.c     	vma = vmacache_find_exact(mm, addr, end);
vma               761 mm/nommu.c     	if (vma)
vma               762 mm/nommu.c     		return vma;
vma               766 mm/nommu.c     	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma               767 mm/nommu.c     		if (vma->vm_start < addr)
vma               769 mm/nommu.c     		if (vma->vm_start > addr)
vma               771 mm/nommu.c     		if (vma->vm_end == end) {
vma               772 mm/nommu.c     			vmacache_update(addr, vma);
vma               773 mm/nommu.c     			return vma;
vma               983 mm/nommu.c     static int do_mmap_shared_file(struct vm_area_struct *vma)
vma               987 mm/nommu.c     	ret = call_mmap(vma->vm_file, vma);
vma               989 mm/nommu.c     		vma->vm_region->vm_top = vma->vm_region->vm_end;
vma              1004 mm/nommu.c     static int do_mmap_private(struct vm_area_struct *vma,
vma              1018 mm/nommu.c     		ret = call_mmap(vma->vm_file, vma);
vma              1021 mm/nommu.c     			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
vma              1022 mm/nommu.c     			vma->vm_region->vm_top = vma->vm_region->vm_end;
vma              1052 mm/nommu.c     	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
vma              1057 mm/nommu.c     	vma->vm_start = region->vm_start;
vma              1058 mm/nommu.c     	vma->vm_end   = region->vm_start + len;
vma              1060 mm/nommu.c     	if (vma->vm_file) {
vma              1064 mm/nommu.c     		fpos = vma->vm_pgoff;
vma              1067 mm/nommu.c     		ret = kernel_read(vma->vm_file, base, len, &fpos);
vma              1076 mm/nommu.c     		vma_set_anonymous(vma);
vma              1083 mm/nommu.c     	region->vm_start = vma->vm_start = 0;
vma              1084 mm/nommu.c     	region->vm_end   = vma->vm_end = 0;
vma              1108 mm/nommu.c     	struct vm_area_struct *vma;
vma              1136 mm/nommu.c     	vma = vm_area_alloc(current->mm);
vma              1137 mm/nommu.c     	if (!vma)
vma              1144 mm/nommu.c     	vma->vm_flags = vm_flags;
vma              1145 mm/nommu.c     	vma->vm_pgoff = pgoff;
vma              1149 mm/nommu.c     		vma->vm_file = get_file(file);
vma              1201 mm/nommu.c     			vma->vm_region = pregion;
vma              1204 mm/nommu.c     			vma->vm_start = start;
vma              1205 mm/nommu.c     			vma->vm_end = start + len;
vma              1208 mm/nommu.c     				vma->vm_flags |= VM_MAPPED_COPY;
vma              1210 mm/nommu.c     				ret = do_mmap_shared_file(vma);
vma              1212 mm/nommu.c     					vma->vm_region = NULL;
vma              1213 mm/nommu.c     					vma->vm_start = 0;
vma              1214 mm/nommu.c     					vma->vm_end = 0;
vma              1248 mm/nommu.c     				vma->vm_start = region->vm_start = addr;
vma              1249 mm/nommu.c     				vma->vm_end = region->vm_end = addr + len;
vma              1254 mm/nommu.c     	vma->vm_region = region;
vma              1259 mm/nommu.c     	if (file && vma->vm_flags & VM_SHARED)
vma              1260 mm/nommu.c     		ret = do_mmap_shared_file(vma);
vma              1262 mm/nommu.c     		ret = do_mmap_private(vma, region, len, capabilities);
vma              1268 mm/nommu.c     	if (!vma->vm_file &&
vma              1275 mm/nommu.c     	result = vma->vm_start;
vma              1280 mm/nommu.c     	add_vma_to_mm(current->mm, vma);
vma              1284 mm/nommu.c     	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
vma              1299 mm/nommu.c     	if (vma->vm_file)
vma              1300 mm/nommu.c     		fput(vma->vm_file);
vma              1301 mm/nommu.c     	vm_area_free(vma);
vma              1383 mm/nommu.c     int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
vma              1392 mm/nommu.c     	if (vma->vm_file)
vma              1402 mm/nommu.c     	new = vm_area_dup(vma);
vma              1409 mm/nommu.c     	*region = *vma->vm_region;
vma              1412 mm/nommu.c     	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
vma              1424 mm/nommu.c     	delete_vma_from_mm(vma);
vma              1426 mm/nommu.c     	delete_nommu_region(vma->vm_region);
vma              1428 mm/nommu.c     		vma->vm_region->vm_start = vma->vm_start = addr;
vma              1429 mm/nommu.c     		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
vma              1431 mm/nommu.c     		vma->vm_region->vm_end = vma->vm_end = addr;
vma              1432 mm/nommu.c     		vma->vm_region->vm_top = addr;
vma              1434 mm/nommu.c     	add_nommu_region(vma->vm_region);
vma              1437 mm/nommu.c     	add_vma_to_mm(mm, vma);
vma              1447 mm/nommu.c     		      struct vm_area_struct *vma,
vma              1454 mm/nommu.c     	delete_vma_from_mm(vma);
vma              1455 mm/nommu.c     	if (from > vma->vm_start)
vma              1456 mm/nommu.c     		vma->vm_end = from;
vma              1458 mm/nommu.c     		vma->vm_start = to;
vma              1459 mm/nommu.c     	add_vma_to_mm(mm, vma);
vma              1462 mm/nommu.c     	region = vma->vm_region;
vma              1487 mm/nommu.c     	struct vm_area_struct *vma;
vma              1498 mm/nommu.c     	vma = find_vma(mm, start);
vma              1499 mm/nommu.c     	if (!vma) {
vma              1511 mm/nommu.c     	if (vma->vm_file) {
vma              1513 mm/nommu.c     			if (start > vma->vm_start)
vma              1515 mm/nommu.c     			if (end == vma->vm_end)
vma              1517 mm/nommu.c     			vma = vma->vm_next;
vma              1518 mm/nommu.c     		} while (vma);
vma              1522 mm/nommu.c     		if (start == vma->vm_start && end == vma->vm_end)
vma              1524 mm/nommu.c     		if (start < vma->vm_start || end > vma->vm_end)
vma              1528 mm/nommu.c     		if (end != vma->vm_end && offset_in_page(end))
vma              1530 mm/nommu.c     		if (start != vma->vm_start && end != vma->vm_end) {
vma              1531 mm/nommu.c     			ret = split_vma(mm, vma, start, 1);
vma              1535 mm/nommu.c     		return shrink_vma(mm, vma, start, end);
vma              1539 mm/nommu.c     	delete_vma_from_mm(vma);
vma              1540 mm/nommu.c     	delete_vma(mm, vma);
vma              1567 mm/nommu.c     	struct vm_area_struct *vma;
vma              1574 mm/nommu.c     	while ((vma = mm->mmap)) {
vma              1575 mm/nommu.c     		mm->mmap = vma->vm_next;
vma              1576 mm/nommu.c     		delete_vma_from_mm(vma);
vma              1577 mm/nommu.c     		delete_vma(mm, vma);
vma              1601 mm/nommu.c     	struct vm_area_struct *vma;
vma              1615 mm/nommu.c     	vma = find_vma_exact(current->mm, addr, old_len);
vma              1616 mm/nommu.c     	if (!vma)
vma              1619 mm/nommu.c     	if (vma->vm_end != vma->vm_start + old_len)
vma              1622 mm/nommu.c     	if (vma->vm_flags & VM_MAYSHARE)
vma              1625 mm/nommu.c     	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
vma              1629 mm/nommu.c     	vma->vm_end = vma->vm_start + new_len;
vma              1630 mm/nommu.c     	return vma->vm_start;
vma              1645 mm/nommu.c     struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
vma              1651 mm/nommu.c     int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
vma              1657 mm/nommu.c     	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma              1662 mm/nommu.c     int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
vma              1665 mm/nommu.c     	unsigned long vm_len = vma->vm_end - vma->vm_start;
vma              1667 mm/nommu.c     	pfn += vma->vm_pgoff;
vma              1668 mm/nommu.c     	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
vma              1672 mm/nommu.c     int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
vma              1675 mm/nommu.c     	unsigned int size = vma->vm_end - vma->vm_start;
vma              1677 mm/nommu.c     	if (!(vma->vm_flags & VM_USERMAP))
vma              1680 mm/nommu.c     	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
vma              1681 mm/nommu.c     	vma->vm_end = vma->vm_start + size;
vma              1710 mm/nommu.c     	struct vm_area_struct *vma;
vma              1717 mm/nommu.c     	vma = find_vma(mm, addr);
vma              1718 mm/nommu.c     	if (vma) {
vma              1720 mm/nommu.c     		if (addr + len >= vma->vm_end)
vma              1721 mm/nommu.c     			len = vma->vm_end - addr;
vma              1724 mm/nommu.c     		if (write && vma->vm_flags & VM_MAYWRITE)
vma              1725 mm/nommu.c     			copy_to_user_page(vma, NULL, addr,
vma              1727 mm/nommu.c     		else if (!write && vma->vm_flags & VM_MAYREAD)
vma              1728 mm/nommu.c     			copy_from_user_page(vma, NULL, addr,
vma              1794 mm/nommu.c     	struct vm_area_struct *vma;
vma              1806 mm/nommu.c     	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
vma              1809 mm/nommu.c     		if (vma->vm_flags & VM_SHARED) {
vma              1822 mm/nommu.c     	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
vma              1823 mm/nommu.c     		if (!(vma->vm_flags & VM_SHARED))
vma              1826 mm/nommu.c     		region = vma->vm_region;
vma               514 mm/oom_kill.c  	struct vm_area_struct *vma;
vma               525 mm/oom_kill.c  	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
vma               526 mm/oom_kill.c  		if (!can_madv_lru_vma(vma))
vma               539 mm/oom_kill.c  		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
vma               544 mm/oom_kill.c  						vma, mm, vma->vm_start,
vma               545 mm/oom_kill.c  						vma->vm_end);
vma               552 mm/oom_kill.c  			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
vma                55 mm/page_idle.c 					struct vm_area_struct *vma,
vma                60 mm/page_idle.c 		.vma = vma,
vma                72 mm/page_idle.c 			if (ptep_clear_young_notify(vma, addr, pvmw.pte))
vma                75 mm/page_idle.c 			if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
vma                50 mm/page_vma_mapped.c 	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
vma               140 mm/page_vma_mapped.c 	struct mm_struct *mm = pvmw->vma->vm_mm;
vma               223 mm/page_vma_mapped.c 			if (pvmw->address >= pvmw->vma->vm_end ||
vma               225 mm/page_vma_mapped.c 					__vma_address(pvmw->page, pvmw->vma) +
vma               257 mm/page_vma_mapped.c int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
vma               261 mm/page_vma_mapped.c 		.vma = vma,
vma               266 mm/page_vma_mapped.c 	start = __vma_address(page, vma);
vma               269 mm/page_vma_mapped.c 	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
vma               271 mm/page_vma_mapped.c 	pvmw.address = max(start, vma->vm_start);
vma                41 mm/pagewalk.c  		if (pmd_none(*pmd) || !walk->vma) {
vma                64 mm/pagewalk.c  		split_huge_pmd(walk->vma, pmd, addr);
vma                87 mm/pagewalk.c  		if (pud_none(*pud) || !walk->vma) {
vma                96 mm/pagewalk.c  			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
vma               107 mm/pagewalk.c  		split_huge_pud(walk->vma, pud, addr);
vma               185 mm/pagewalk.c  	struct vm_area_struct *vma = walk->vma;
vma               186 mm/pagewalk.c  	struct hstate *h = hstate_vma(vma);
vma               228 mm/pagewalk.c  	struct vm_area_struct *vma = walk->vma;
vma               242 mm/pagewalk.c  	if (vma->vm_flags & VM_PFNMAP) {
vma               255 mm/pagewalk.c  	struct vm_area_struct *vma = walk->vma;
vma               257 mm/pagewalk.c  	if (vma && is_vm_hugetlb_page(vma)) {
vma               307 mm/pagewalk.c  	struct vm_area_struct *vma;
vma               322 mm/pagewalk.c  	vma = find_vma(walk.mm, start);
vma               324 mm/pagewalk.c  		if (!vma) { /* after the last vma */
vma               325 mm/pagewalk.c  			walk.vma = NULL;
vma               327 mm/pagewalk.c  		} else if (start < vma->vm_start) { /* outside vma */
vma               328 mm/pagewalk.c  			walk.vma = NULL;
vma               329 mm/pagewalk.c  			next = min(end, vma->vm_start);
vma               331 mm/pagewalk.c  			walk.vma = vma;
vma               332 mm/pagewalk.c  			next = min(end, vma->vm_end);
vma               333 mm/pagewalk.c  			vma = vma->vm_next;
vma               348 mm/pagewalk.c  		if (walk.vma || walk.ops->pte_hole)
vma               356 mm/pagewalk.c  int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
vma               361 mm/pagewalk.c  		.mm		= vma->vm_mm,
vma               362 mm/pagewalk.c  		.vma		= vma,
vma               372 mm/pagewalk.c  	err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
vma               377 mm/pagewalk.c  	return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
vma                55 mm/pgtable-generic.c int ptep_set_access_flags(struct vm_area_struct *vma,
vma                61 mm/pgtable-generic.c 		set_pte_at(vma->vm_mm, address, ptep, entry);
vma                62 mm/pgtable-generic.c 		flush_tlb_fix_spurious_fault(vma, address);
vma                69 mm/pgtable-generic.c int ptep_clear_flush_young(struct vm_area_struct *vma,
vma                73 mm/pgtable-generic.c 	young = ptep_test_and_clear_young(vma, address, ptep);
vma                75 mm/pgtable-generic.c 		flush_tlb_page(vma, address);
vma                81 mm/pgtable-generic.c pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
vma                84 mm/pgtable-generic.c 	struct mm_struct *mm = (vma)->vm_mm;
vma                88 mm/pgtable-generic.c 		flush_tlb_page(vma, address);
vma                96 mm/pgtable-generic.c int pmdp_set_access_flags(struct vm_area_struct *vma,
vma               103 mm/pgtable-generic.c 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
vma               104 mm/pgtable-generic.c 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               111 mm/pgtable-generic.c int pmdp_clear_flush_young(struct vm_area_struct *vma,
vma               116 mm/pgtable-generic.c 	young = pmdp_test_and_clear_young(vma, address, pmdp);
vma               118 mm/pgtable-generic.c 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               124 mm/pgtable-generic.c pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
vma               131 mm/pgtable-generic.c 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
vma               132 mm/pgtable-generic.c 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               137 mm/pgtable-generic.c pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
vma               144 mm/pgtable-generic.c 	pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
vma               145 mm/pgtable-generic.c 	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
vma               185 mm/pgtable-generic.c pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
vma               188 mm/pgtable-generic.c 	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
vma               189 mm/pgtable-generic.c 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               195 mm/pgtable-generic.c pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
vma               206 mm/pgtable-generic.c 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
vma               209 mm/pgtable-generic.c 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
vma               138 mm/rmap.c      static void anon_vma_chain_link(struct vm_area_struct *vma,
vma               142 mm/rmap.c      	avc->vma = vma;
vma               144 mm/rmap.c      	list_add(&avc->same_vma, &vma->anon_vma_chain);
vma               176 mm/rmap.c      int __anon_vma_prepare(struct vm_area_struct *vma)
vma               178 mm/rmap.c      	struct mm_struct *mm = vma->vm_mm;
vma               188 mm/rmap.c      	anon_vma = find_mergeable_anon_vma(vma);
vma               200 mm/rmap.c      	if (likely(!vma->anon_vma)) {
vma               201 mm/rmap.c      		vma->anon_vma = anon_vma;
vma               202 mm/rmap.c      		anon_vma_chain_link(vma, avc, anon_vma);
vma               316 mm/rmap.c      int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma               327 mm/rmap.c      	vma->anon_vma = NULL;
vma               333 mm/rmap.c      	error = anon_vma_clone(vma, pvma);
vma               338 mm/rmap.c      	if (vma->anon_vma)
vma               362 mm/rmap.c      	vma->anon_vma = anon_vma;
vma               364 mm/rmap.c      	anon_vma_chain_link(vma, avc, anon_vma);
vma               373 mm/rmap.c      	unlink_anon_vmas(vma);
vma               377 mm/rmap.c      void unlink_anon_vmas(struct vm_area_struct *vma)
vma               386 mm/rmap.c      	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
vma               404 mm/rmap.c      	if (vma->anon_vma)
vma               405 mm/rmap.c      		vma->anon_vma->degree--;
vma               413 mm/rmap.c      	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
vma               688 mm/rmap.c      unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
vma               697 mm/rmap.c      		if (!vma->anon_vma || !page__anon_vma ||
vma               698 mm/rmap.c      		    vma->anon_vma->root != page__anon_vma->root)
vma               701 mm/rmap.c      		if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
vma               705 mm/rmap.c      	address = __vma_address(page, vma);
vma               706 mm/rmap.c      	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
vma               754 mm/rmap.c      static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
vma               760 mm/rmap.c      		.vma = vma,
vma               768 mm/rmap.c      		if (vma->vm_flags & VM_LOCKED) {
vma               775 mm/rmap.c      			if (ptep_clear_flush_young_notify(vma, address,
vma               785 mm/rmap.c      				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
vma               789 mm/rmap.c      			if (pmdp_clear_flush_young_notify(vma, address,
vma               807 mm/rmap.c      		pra->vm_flags |= vma->vm_flags;
vma               816 mm/rmap.c      static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
vma               821 mm/rmap.c      	if (!mm_match_cgroup(vma->vm_mm, memcg))
vma               884 mm/rmap.c      static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
vma               889 mm/rmap.c      		.vma = vma,
vma               901 mm/rmap.c      				0, vma, vma->vm_mm, address,
vma               902 mm/rmap.c      				min(vma->vm_end, address + page_size(page)));
vma               916 mm/rmap.c      			flush_cache_page(vma, address, pte_pfn(*pte));
vma               917 mm/rmap.c      			entry = ptep_clear_flush(vma, address, pte);
vma               920 mm/rmap.c      			set_pte_at(vma->vm_mm, address, pte, entry);
vma               930 mm/rmap.c      			flush_cache_page(vma, address, page_to_pfn(page));
vma               931 mm/rmap.c      			entry = pmdp_invalidate(vma, address, pmd);
vma               934 mm/rmap.c      			set_pmd_at(vma->vm_mm, address, pmd, entry);
vma               958 mm/rmap.c      static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
vma               960 mm/rmap.c      	if (vma->vm_flags & VM_SHARED)
vma              1001 mm/rmap.c      void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
vma              1003 mm/rmap.c      	struct anon_vma *anon_vma = vma->anon_vma;
vma              1008 mm/rmap.c      	VM_BUG_ON_VMA(!anon_vma, vma);
vma              1027 mm/rmap.c      	struct vm_area_struct *vma, unsigned long address, int exclusive)
vma              1029 mm/rmap.c      	struct anon_vma *anon_vma = vma->anon_vma;
vma              1046 mm/rmap.c      	page->index = linear_page_index(vma, address);
vma              1056 mm/rmap.c      	struct vm_area_struct *vma, unsigned long address)
vma              1071 mm/rmap.c      	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
vma              1072 mm/rmap.c      	BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
vma              1089 mm/rmap.c      	struct vm_area_struct *vma, unsigned long address, bool compound)
vma              1091 mm/rmap.c      	do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
vma              1100 mm/rmap.c      	struct vm_area_struct *vma, unsigned long address, int flags)
vma              1134 mm/rmap.c      		__page_set_anon_rmap(page, vma, address,
vma              1137 mm/rmap.c      		__page_check_anon_rmap(page, vma, address);
vma              1152 mm/rmap.c      	struct vm_area_struct *vma, unsigned long address, bool compound)
vma              1156 mm/rmap.c      	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
vma              1170 mm/rmap.c      	__page_set_anon_rmap(page, vma, address, 1);
vma              1341 mm/rmap.c      static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
vma              1344 mm/rmap.c      	struct mm_struct *mm = vma->vm_mm;
vma              1347 mm/rmap.c      		.vma = vma,
vma              1357 mm/rmap.c      	if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
vma              1365 mm/rmap.c      		split_huge_pmd_address(vma, address,
vma              1377 mm/rmap.c      	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
vma              1379 mm/rmap.c      				min(vma->vm_end, address + page_size(page)));
vma              1385 mm/rmap.c      		adjust_range_if_pmd_sharing_possible(vma, &range.start,
vma              1407 mm/rmap.c      			if (vma->vm_flags & VM_LOCKED) {
vma              1439 mm/rmap.c      				flush_cache_range(vma, range.start, range.end);
vma              1440 mm/rmap.c      				flush_tlb_range(vma, range.start, range.end);
vma              1492 mm/rmap.c      			if (ptep_clear_flush_young_notify(vma, address,
vma              1501 mm/rmap.c      		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
vma              1515 mm/rmap.c      			pteval = ptep_clear_flush(vma, address, pvmw.pte);
vma              1531 mm/rmap.c      						     vma_mmu_pagesize(vma));
vma              1537 mm/rmap.c      		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
vma              1557 mm/rmap.c      			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
vma              1623 mm/rmap.c      			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
vma              1674 mm/rmap.c      bool is_vma_temporary_stack(struct vm_area_struct *vma)
vma              1676 mm/rmap.c      	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
vma              1681 mm/rmap.c      	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
vma              1688 mm/rmap.c      static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
vma              1690 mm/rmap.c      	return is_vma_temporary_stack(vma);
vma              1833 mm/rmap.c      		struct vm_area_struct *vma = avc->vma;
vma              1834 mm/rmap.c      		unsigned long address = vma_address(page, vma);
vma              1838 mm/rmap.c      		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
vma              1841 mm/rmap.c      		if (!rwc->rmap_one(page, vma, address, rwc->arg))
vma              1869 mm/rmap.c      	struct vm_area_struct *vma;
vma              1886 mm/rmap.c      	vma_interval_tree_foreach(vma, &mapping->i_mmap,
vma              1888 mm/rmap.c      		unsigned long address = vma_address(page, vma);
vma              1892 mm/rmap.c      		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
vma              1895 mm/rmap.c      		if (!rwc->rmap_one(page, vma, address, rwc->arg))
vma              1934 mm/rmap.c      			    struct vm_area_struct *vma, unsigned long address)
vma              1936 mm/rmap.c      	struct anon_vma *anon_vma = vma->anon_vma;
vma              1944 mm/rmap.c      		__page_set_anon_rmap(page, vma, address, 0);
vma              1948 mm/rmap.c      			struct vm_area_struct *vma, unsigned long address)
vma              1950 mm/rmap.c      	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
vma              1952 mm/rmap.c      	__page_set_anon_rmap(page, vma, address, 1);
vma               144 mm/shmem.c     			     gfp_t gfp, struct vm_area_struct *vma,
vma               148 mm/shmem.c     		gfp_t gfp, struct vm_area_struct *vma,
vma               256 mm/shmem.c     bool vma_is_shmem(struct vm_area_struct *vma)
vma               258 mm/shmem.c     	return vma->vm_ops == &shmem_vm_ops;
vma               734 mm/shmem.c     unsigned long shmem_swap_usage(struct vm_area_struct *vma)
vma               736 mm/shmem.c     	struct inode *inode = file_inode(vma->vm_file);
vma               752 mm/shmem.c     	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
vma               757 mm/shmem.c     			linear_page_index(vma, vma->vm_start),
vma               758 mm/shmem.c     			linear_page_index(vma, vma->vm_end));
vma              1434 mm/shmem.c     static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
vma              1438 mm/shmem.c     	vma_init(vma, NULL);
vma              1440 mm/shmem.c     	vma->vm_pgoff = index + info->vfs_inode.i_ino;
vma              1441 mm/shmem.c     	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
vma              1444 mm/shmem.c     static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
vma              1447 mm/shmem.c     	mpol_cond_put(vma->vm_policy);
vma              1458 mm/shmem.c     	vmf.vma = &pvma;
vma              1628 mm/shmem.c     			     gfp_t gfp, struct vm_area_struct *vma,
vma              1633 mm/shmem.c     	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
vma              1742 mm/shmem.c     	struct vm_area_struct *vma, struct vm_fault *vmf,
vma              1768 mm/shmem.c     	charge_mm = vma ? vma->vm_mm : current->mm;
vma              1773 mm/shmem.c     					  sgp, gfp, vma, fault_type);
vma              1802 mm/shmem.c     	if (vma && userfaultfd_missing(vma)) {
vma              1992 mm/shmem.c     	struct vm_area_struct *vma = vmf->vma;
vma              1993 mm/shmem.c     	struct inode *inode = file_inode(vma->vm_file);
vma              2060 mm/shmem.c     	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
vma              2061 mm/shmem.c     	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
vma              2063 mm/shmem.c     	else if (vma->vm_flags & VM_HUGEPAGE)
vma              2067 mm/shmem.c     				  gfp, vma, vmf, &ret);
vma              2163 mm/shmem.c     static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
vma              2165 mm/shmem.c     	struct inode *inode = file_inode(vma->vm_file);
vma              2166 mm/shmem.c     	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
vma              2169 mm/shmem.c     static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
vma              2172 mm/shmem.c     	struct inode *inode = file_inode(vma->vm_file);
vma              2175 mm/shmem.c     	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
vma              2208 mm/shmem.c     static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
vma              2217 mm/shmem.c     		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
vma              2227 mm/shmem.c     		if (vma->vm_flags & VM_SHARED)
vma              2228 mm/shmem.c     			vma->vm_flags &= ~(VM_MAYWRITE);
vma              2232 mm/shmem.c     	vma->vm_ops = &shmem_vm_ops;
vma              2234 mm/shmem.c     			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
vma              2235 mm/shmem.c     			(vma->vm_end & HPAGE_PMD_MASK)) {
vma              2236 mm/shmem.c     		khugepaged_enter(vma, vma->vm_flags);
vma              3990 mm/shmem.c     bool shmem_huge_enabled(struct vm_area_struct *vma)
vma              3992 mm/shmem.c     	struct inode *inode = file_inode(vma->vm_file);
vma              3997 mm/shmem.c     	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
vma              3998 mm/shmem.c     	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
vma              4010 mm/shmem.c     			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
vma              4018 mm/shmem.c     			return (vma->vm_flags & VM_HUGEPAGE);
vma              4173 mm/shmem.c     int shmem_zero_setup(struct vm_area_struct *vma)
vma              4176 mm/shmem.c     	loff_t size = vma->vm_end - vma->vm_start;
vma              4184 mm/shmem.c     	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
vma              4188 mm/shmem.c     	if (vma->vm_file)
vma              4189 mm/shmem.c     		fput(vma->vm_file);
vma              4190 mm/shmem.c     	vma->vm_file = file;
vma              4191 mm/shmem.c     	vma->vm_ops = &shmem_vm_ops;
vma              4194 mm/shmem.c     			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
vma              4195 mm/shmem.c     			(vma->vm_end & HPAGE_PMD_MASK)) {
vma              4196 mm/shmem.c     		khugepaged_enter(vma, vma->vm_flags);
vma               456 mm/swap.c      					 struct vm_area_struct *vma)
vma               460 mm/swap.c      	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
vma                58 mm/swap_state.c #define GET_SWAP_RA_VAL(vma)					\
vma                59 mm/swap_state.c 	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
vma               310 mm/swap_state.c struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
vma               336 mm/swap_state.c 		if (vma && vma_ra) {
vma               340 mm/swap_state.c 			ra_val = GET_SWAP_RA_VAL(vma);
vma               345 mm/swap_state.c 			atomic_long_set(&vma->swap_readahead_info,
vma               351 mm/swap_state.c 			if (!vma || !vma_ra)
vma               360 mm/swap_state.c 			struct vm_area_struct *vma, unsigned long addr,
vma               398 mm/swap_state.c 			new_page = alloc_page_vma(gfp_mask, vma, addr);
vma               449 mm/swap_state.c 		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
vma               453 mm/swap_state.c 			vma, addr, &page_was_allocated);
vma               550 mm/swap_state.c 	struct vm_area_struct *vma = vmf->vma;
vma               578 mm/swap_state.c 			gfp_mask, vma, addr, &page_allocated);
vma               594 mm/swap_state.c 	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
vma               627 mm/swap_state.c static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
vma               634 mm/swap_state.c 	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
vma               636 mm/swap_state.c 	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
vma               643 mm/swap_state.c 	struct vm_area_struct *vma = vmf->vma;
vma               670 mm/swap_state.c 	ra_val = GET_SWAP_RA_VAL(vma);
vma               676 mm/swap_state.c 	atomic_long_set(&vma->swap_readahead_info,
vma               686 mm/swap_state.c 		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
vma               688 mm/swap_state.c 		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
vma               692 mm/swap_state.c 		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
vma               726 mm/swap_state.c 	struct vm_area_struct *vma = vmf->vma;
vma               749 mm/swap_state.c 		page = __read_swap_cache_async(entry, gfp_mask, vma,
vma               765 mm/swap_state.c 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
vma              1853 mm/swapfile.c  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
vma              1863 mm/swapfile.c  	page = ksm_might_need_to_copy(page, vma, addr);
vma              1867 mm/swapfile.c  	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
vma              1873 mm/swapfile.c  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
vma              1880 mm/swapfile.c  	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
vma              1881 mm/swapfile.c  	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
vma              1883 mm/swapfile.c  	set_pte_at(vma->vm_mm, addr, pte,
vma              1884 mm/swapfile.c  		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
vma              1886 mm/swapfile.c  		page_add_anon_rmap(page, vma, addr, false);
vma              1889 mm/swapfile.c  		page_add_new_anon_rmap(page, vma, addr, false);
vma              1891 mm/swapfile.c  		lru_cache_add_active_or_unevictable(page, vma);
vma              1909 mm/swapfile.c  static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
vma              1940 mm/swapfile.c  		vmf.vma = vma;
vma              1952 mm/swapfile.c  		ret = unuse_pte(vma, pmd, addr, entry, page);
vma              1977 mm/swapfile.c  static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
vma              1992 mm/swapfile.c  		ret = unuse_pte_range(vma, pmd, addr, next, type,
vma              2000 mm/swapfile.c  static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
vma              2014 mm/swapfile.c  		ret = unuse_pmd_range(vma, pud, addr, next, type,
vma              2022 mm/swapfile.c  static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
vma              2036 mm/swapfile.c  		ret = unuse_pud_range(vma, p4d, addr, next, type,
vma              2044 mm/swapfile.c  static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
vma              2051 mm/swapfile.c  	addr = vma->vm_start;
vma              2052 mm/swapfile.c  	end = vma->vm_end;
vma              2054 mm/swapfile.c  	pgd = pgd_offset(vma->vm_mm, addr);
vma              2059 mm/swapfile.c  		ret = unuse_p4d_range(vma, pgd, addr, next, type,
vma              2070 mm/swapfile.c  	struct vm_area_struct *vma;
vma              2074 mm/swapfile.c  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma              2075 mm/swapfile.c  		if (vma->anon_vma) {
vma              2076 mm/swapfile.c  			ret = unuse_vma(vma, type, frontswap,
vma               273 mm/util.c      void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
vma               278 mm/util.c      	vma->vm_prev = prev;
vma               281 mm/util.c      		prev->vm_next = vma;
vma               283 mm/util.c      		mm->mmap = vma;
vma               290 mm/util.c      	vma->vm_next = next;
vma               292 mm/util.c      		next->vm_prev = vma;
vma               296 mm/util.c      int vma_is_stack_for_current(struct vm_area_struct *vma)
vma               300 mm/util.c      	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
vma                73 mm/vmacache.c  		struct vm_area_struct *vma = current->vmacache.vmas[idx];
vma                75 mm/vmacache.c  		if (vma) {
vma                77 mm/vmacache.c  			if (WARN_ON_ONCE(vma->vm_mm != mm))
vma                80 mm/vmacache.c  			if (vma->vm_start <= addr && vma->vm_end > addr) {
vma                82 mm/vmacache.c  				return vma;
vma               106 mm/vmacache.c  		struct vm_area_struct *vma = current->vmacache.vmas[idx];
vma               108 mm/vmacache.c  		if (vma && vma->vm_start == start && vma->vm_end == end) {
vma               110 mm/vmacache.c  			return vma;
vma              2992 mm/vmalloc.c   int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
vma              3024 mm/vmalloc.c   		ret = vm_insert_page(vma, uaddr, page);
vma              3033 mm/vmalloc.c   	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma              3053 mm/vmalloc.c   int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
vma              3056 mm/vmalloc.c   	return remap_vmalloc_range_partial(vma, vma->vm_start,
vma              3058 mm/vmalloc.c   					   vma->vm_end - vma->vm_start);
vma              2730 net/core/sock.c int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
vma              1732 net/ipv4/tcp.c 	     struct vm_area_struct *vma)
vma              1734 net/ipv4/tcp.c 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
vma              1736 net/ipv4/tcp.c 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
vma              1739 net/ipv4/tcp.c 	vma->vm_flags |= VM_MIXEDMAP;
vma              1741 net/ipv4/tcp.c 	vma->vm_ops = &tcp_vm_ops;
vma              1752 net/ipv4/tcp.c 	struct vm_area_struct *vma;
vma              1768 net/ipv4/tcp.c 	vma = find_vma(current->mm, address);
vma              1769 net/ipv4/tcp.c 	if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
vma              1773 net/ipv4/tcp.c 	zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
vma              1781 net/ipv4/tcp.c 		zap_page_range(vma, address, zc->length);
vma              1819 net/ipv4/tcp.c 		ret = vm_insert_page(vma, address + length,
vma              4192 net/packet/af_packet.c static void packet_mm_open(struct vm_area_struct *vma)
vma              4194 net/packet/af_packet.c 	struct file *file = vma->vm_file;
vma              4202 net/packet/af_packet.c static void packet_mm_close(struct vm_area_struct *vma)
vma              4204 net/packet/af_packet.c 	struct file *file = vma->vm_file;
vma              4452 net/packet/af_packet.c 		struct vm_area_struct *vma)
vma              4462 net/packet/af_packet.c 	if (vma->vm_pgoff)
vma              4479 net/packet/af_packet.c 	size = vma->vm_end - vma->vm_start;
vma              4483 net/packet/af_packet.c 	start = vma->vm_start;
vma              4495 net/packet/af_packet.c 				err = vm_insert_page(vma, start, page);
vma              4505 net/packet/af_packet.c 	vma->vm_ops = &packet_mmap_ops;
vma               114 net/socket.c   static int sock_mmap(struct file *file, struct vm_area_struct *vma);
vma              1259 net/socket.c   static int sock_mmap(struct file *file, struct vm_area_struct *vma)
vma              1263 net/socket.c   	return sock->ops->mmap(file, sock, vma);
vma               963 net/xdp/xsk.c  		    struct vm_area_struct *vma)
vma               965 net/xdp/xsk.c  	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
vma               966 net/xdp/xsk.c  	unsigned long size = vma->vm_end - vma->vm_start;
vma              1003 net/xdp/xsk.c  	return remap_pfn_range(vma, vma->vm_start, pfn,
vma              1004 net/xdp/xsk.c  			       size, vma->vm_page_prot);
vma               750 samples/vfio-mdev/mbochs.c 	struct vm_area_struct *vma = vmf->vma;
vma               751 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = vma->vm_private_data;
vma               752 samples/vfio-mdev/mbochs.c 	pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
vma               768 samples/vfio-mdev/mbochs.c static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
vma               772 samples/vfio-mdev/mbochs.c 	if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
vma               774 samples/vfio-mdev/mbochs.c 	if (vma->vm_end < vma->vm_start)
vma               776 samples/vfio-mdev/mbochs.c 	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
vma               778 samples/vfio-mdev/mbochs.c 	if ((vma->vm_flags & VM_SHARED) == 0)
vma               781 samples/vfio-mdev/mbochs.c 	vma->vm_ops = &mbochs_region_vm_ops;
vma               782 samples/vfio-mdev/mbochs.c 	vma->vm_private_data = mdev_state;
vma               788 samples/vfio-mdev/mbochs.c 	struct vm_area_struct *vma = vmf->vma;
vma               789 samples/vfio-mdev/mbochs.c 	struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
vma               803 samples/vfio-mdev/mbochs.c static int mbochs_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
vma               810 samples/vfio-mdev/mbochs.c 	if ((vma->vm_flags & VM_SHARED) == 0)
vma               813 samples/vfio-mdev/mbochs.c 	vma->vm_ops = &mbochs_dmabuf_vm_ops;
vma               814 samples/vfio-mdev/mbochs.c 	vma->vm_private_data = dmabuf;
vma               407 samples/vfio-mdev/mdpy.c static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
vma               411 samples/vfio-mdev/mdpy.c 	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
vma               413 samples/vfio-mdev/mdpy.c 	if (vma->vm_end < vma->vm_start)
vma               415 samples/vfio-mdev/mdpy.c 	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
vma               417 samples/vfio-mdev/mdpy.c 	if ((vma->vm_flags & VM_SHARED) == 0)
vma               420 samples/vfio-mdev/mdpy.c 	return remap_vmalloc_range_partial(vma, vma->vm_start,
vma               422 samples/vfio-mdev/mdpy.c 					   vma->vm_end - vma->vm_start);
vma               509 security/apparmor/lsm.c static int apparmor_file_mprotect(struct vm_area_struct *vma,
vma               512 security/apparmor/lsm.c 	return common_mmap(OP_FMPROT, vma->vm_file, prot,
vma               513 security/apparmor/lsm.c 			   !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
vma              1461 security/security.c int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
vma              1464 security/security.c 	return call_int_hook(file_mprotect, 0, vma, reqprot, prot);
vma              3727 security/selinux/hooks.c static int selinux_file_mprotect(struct vm_area_struct *vma,
vma              3738 security/selinux/hooks.c 	    (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
vma              3740 security/selinux/hooks.c 		if (vma->vm_start >= vma->vm_mm->start_brk &&
vma              3741 security/selinux/hooks.c 		    vma->vm_end <= vma->vm_mm->brk) {
vma              3745 security/selinux/hooks.c 		} else if (!vma->vm_file &&
vma              3746 security/selinux/hooks.c 			   ((vma->vm_start <= vma->vm_mm->start_stack &&
vma              3747 security/selinux/hooks.c 			     vma->vm_end >= vma->vm_mm->start_stack) ||
vma              3748 security/selinux/hooks.c 			    vma_is_stack_for_current(vma))) {
vma              3752 security/selinux/hooks.c 		} else if (vma->vm_file && vma->anon_vma) {
vma              3760 security/selinux/hooks.c 			rc = file_has_perm(cred, vma->vm_file, FILE__EXECMOD);
vma              3766 security/selinux/hooks.c 	return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
vma               246 security/selinux/selinuxfs.c 				  struct vm_area_struct *vma)
vma               249 security/selinux/selinuxfs.c 	unsigned long	size = vma->vm_end - vma->vm_start;
vma               254 security/selinux/selinuxfs.c 	if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
vma               257 security/selinux/selinuxfs.c 	if (vma->vm_flags & VM_WRITE)
vma               260 security/selinux/selinuxfs.c 	vma->vm_flags &= ~VM_MAYWRITE;
vma               262 security/selinux/selinuxfs.c 	return remap_pfn_range(vma, vma->vm_start,
vma               264 security/selinux/selinuxfs.c 			       size, vma->vm_page_prot);
vma               457 security/selinux/selinuxfs.c 	struct policy_load_memory *plm = vmf->vma->vm_file->private_data;
vma               481 security/selinux/selinuxfs.c static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
vma               483 security/selinux/selinuxfs.c 	if (vma->vm_flags & VM_SHARED) {
vma               485 security/selinux/selinuxfs.c 		vma->vm_flags &= ~VM_MAYWRITE;
vma               487 security/selinux/selinuxfs.c 		if (vma->vm_flags & VM_WRITE)
vma               491 security/selinux/selinuxfs.c 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma               492 security/selinux/selinuxfs.c 	vma->vm_ops = &sel_mmap_policy_ops;
vma               135 sound/arm/pxa2xx-pcm-lib.c 	struct vm_area_struct *vma)
vma               138 sound/arm/pxa2xx-pcm-lib.c 	return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
vma               382 sound/core/compress_offload.c static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
vma               248 sound/core/hwdep.c static int snd_hwdep_mmap(struct file * file, struct vm_area_struct * vma)
vma               252 sound/core/hwdep.c 		return hw->ops.mmap(hw, file, vma);
vma               220 sound/core/info.c static int snd_info_entry_mmap(struct file *file, struct vm_area_struct *vma)
vma               233 sound/core/info.c 				  inode, file, vma);
vma               337 sound/core/init.c static int snd_disconnect_mmap(struct file *file, struct vm_area_struct *vma)
vma              3241 sound/core/pcm_native.c 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
vma              3277 sound/core/pcm_native.c 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
vma              3366 sound/core/pcm_native.c 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
vma              1057 sound/soc/amd/acp-pcm-dma.c 			struct vm_area_struct *vma)
vma              1059 sound/soc/amd/acp-pcm-dma.c 	return snd_pcm_lib_default_mmap(substream, vma);
vma               398 sound/soc/amd/raven/acp3x-pcm-dma.c 			  struct vm_area_struct *vma)
vma               400 sound/soc/amd/raven/acp3x-pcm-dma.c 	return snd_pcm_lib_default_mmap(substream, vma);
vma                60 sound/soc/atmel/atmel-pcm-pdc.c 	struct vm_area_struct *vma)
vma                62 sound/soc/atmel/atmel-pcm-pdc.c 	return remap_pfn_range(vma, vma->vm_start,
vma                64 sound/soc/atmel/atmel-pcm-pdc.c 		       vma->vm_end - vma->vm_start, vma->vm_page_prot);
vma               210 sound/soc/fsl/imx-pcm-fiq.c 		struct vm_area_struct *vma)
vma               215 sound/soc/fsl/imx-pcm-fiq.c 	ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
vma               298 sound/soc/intel/baytrail/sst-baytrail-pcm.c 			    struct vm_area_struct *vma)
vma               303 sound/soc/intel/baytrail/sst-baytrail-pcm.c 	return snd_pcm_lib_default_mmap(substream, vma);
vma               121 sound/soc/pxa/mmp-pcm.c 			 struct vm_area_struct *vma)
vma               124 sound/soc/pxa/mmp-pcm.c 	unsigned long off = vma->vm_pgoff;
vma               126 sound/soc/pxa/mmp-pcm.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               127 sound/soc/pxa/mmp-pcm.c 	return remap_pfn_range(vma, vma->vm_start,
vma               129 sound/soc/pxa/mmp-pcm.c 		vma->vm_end - vma->vm_start, vma->vm_page_prot);
vma               399 sound/soc/qcom/lpass-platform.c 		struct vm_area_struct *vma)
vma               403 sound/soc/qcom/lpass-platform.c 	return dma_mmap_coherent(substream->pcm->card->dev, vma,
vma               451 sound/soc/qcom/qdsp6/q6asm-dai.c 				struct vm_area_struct *vma)
vma               459 sound/soc/qcom/qdsp6/q6asm-dai.c 	return dma_mmap_coherent(dev, vma,
vma               750 sound/soc/qcom/qdsp6/q6asm-dai.c 		struct vm_area_struct *vma)
vma               758 sound/soc/qcom/qdsp6/q6asm-dai.c 	return dma_mmap_coherent(dev, vma,
vma               237 sound/soc/samsung/idma.c 	struct vm_area_struct *vma)
vma               244 sound/soc/samsung/idma.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma               245 sound/soc/samsung/idma.c 	size = vma->vm_end - vma->vm_start;
vma               246 sound/soc/samsung/idma.c 	offset = vma->vm_pgoff << PAGE_SHIFT;
vma               247 sound/soc/samsung/idma.c 	ret = io_remap_pfn_range(vma, vma->vm_start,
vma               249 sound/soc/samsung/idma.c 			size, vma->vm_page_prot);
vma               511 sound/soc/soc-component.c 			       struct vm_area_struct *vma)
vma               523 sound/soc/soc-component.c 			return component->driver->ops->mmap(substream, vma);
vma               448 sound/soc/sprd/sprd-pcm-dma.c 			 struct vm_area_struct *vma)
vma               452 sound/soc/sprd/sprd-pcm-dma.c 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               453 sound/soc/sprd/sprd-pcm-dma.c 	return remap_pfn_range(vma, vma->vm_start,
vma               455 sound/soc/sprd/sprd-pcm-dma.c 			       vma->vm_end - vma->vm_start,
vma               456 sound/soc/sprd/sprd-pcm-dma.c 			       vma->vm_page_prot);
vma               208 sound/soc/uniphier/aio-dma.c 				struct vm_area_struct *vma)
vma               210 sound/soc/uniphier/aio-dma.c 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma               212 sound/soc/uniphier/aio-dma.c 	return remap_pfn_range(vma, vma->vm_start,
vma               214 sound/soc/uniphier/aio-dma.c 			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
vma               134 sound/usb/usx2y/us122l.c 	struct us122l *us122l = vmf->vma->vm_private_data;
vma                28 sound/usb/usx2y/usX2Yhwdep.c 		   vmf->vma->vm_start,
vma                32 sound/usb/usx2y/usX2Yhwdep.c 	vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
vma               651 sound/usb/usx2y/usx2yhwdeppcm.c 	vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
vma              1278 sound/x86/intel_hdmi_audio.c 			struct vm_area_struct *vma)
vma              1280 sound/x86/intel_hdmi_audio.c 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma              1281 sound/x86/intel_hdmi_audio.c 	return remap_pfn_range(vma, vma->vm_start,
vma              1283 sound/x86/intel_hdmi_audio.c 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
vma               360 tools/perf/jvmti/jvmti_agent.c 	uint64_t vma, void const *code, unsigned int const size)
vma               384 tools/perf/jvmti/jvmti_agent.c 	rec.vma        = vma;
vma               385 tools/perf/jvmti/jvmti_agent.c 	rec.code_addr  = vma;
vma                23 tools/perf/jvmti/jvmti_agent.h 		       uint64_t vma, void const *code,
vma                57 tools/perf/util/jitdump.c 	unsigned long vma;
vma               330 tools/perf/util/jitdump.c 			jr->load.vma       = bswap_64(jr->load.vma);
vma               341 tools/perf/util/jitdump.c 			jr->move.vma           = bswap_64(jr->move.vma);
vma                70 tools/perf/util/jitdump.h 	uint64_t vma;
vma                85 tools/perf/util/jitdump.h 	uint64_t vma;
vma               193 tools/perf/util/srcline.c 	bfd_vma pc, vma;
vma               211 tools/perf/util/srcline.c 	vma = bfd_get_section_vma(abfd, section);
vma               213 tools/perf/util/srcline.c 	vma = bfd_section_vma(section);
vma               221 tools/perf/util/srcline.c 	if (pc < vma || pc >= vma + size)
vma               224 tools/perf/util/srcline.c 	a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma,
vma               944 virt/kvm/arm/mmu.c 		struct vm_area_struct *vma = find_vma(current->mm, hva);
vma               947 virt/kvm/arm/mmu.c 		if (!vma || vma->vm_start >= reg_end)
vma               953 virt/kvm/arm/mmu.c 		vm_start = max(hva, vma->vm_start);
vma               954 virt/kvm/arm/mmu.c 		vm_end = min(reg_end, vma->vm_end);
vma               956 virt/kvm/arm/mmu.c 		if (!(vma->vm_flags & VM_PFNMAP)) {
vma              1600 virt/kvm/arm/mmu.c 				     struct vm_area_struct *vma)
vma              1604 virt/kvm/arm/mmu.c 	if (is_vm_hugetlb_page(vma))
vma              1605 virt/kvm/arm/mmu.c 		lsb = huge_page_shift(hstate_vma(vma));
vma              1680 virt/kvm/arm/mmu.c 	struct vm_area_struct *vma;
vma              1697 virt/kvm/arm/mmu.c 	vma = find_vma_intersection(current->mm, hva, hva + 1);
vma              1698 virt/kvm/arm/mmu.c 	if (unlikely(!vma)) {
vma              1704 virt/kvm/arm/mmu.c 	vma_pagesize = vma_kernel_pagesize(vma);
vma              1706 virt/kvm/arm/mmu.c 	    (vma->vm_flags & VM_PFNMAP) ||
vma              1721 virt/kvm/arm/mmu.c 		gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
vma              1744 virt/kvm/arm/mmu.c 		kvm_send_hwpoison_signal(hva, vma);
vma              2312 virt/kvm/arm/mmu.c 		struct vm_area_struct *vma = find_vma(current->mm, hva);
vma              2315 virt/kvm/arm/mmu.c 		if (!vma || vma->vm_start >= reg_end)
vma              2322 virt/kvm/arm/mmu.c 		if (writable && !(vma->vm_flags & VM_WRITE)) {
vma              2330 virt/kvm/arm/mmu.c 		vm_start = max(hva, vma->vm_start);
vma              2331 virt/kvm/arm/mmu.c 		vm_end = min(reg_end, vma->vm_end);
vma              2333 virt/kvm/arm/mmu.c 		if (vma->vm_flags & VM_PFNMAP) {
vma              2338 virt/kvm/arm/mmu.c 			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
vma              2339 virt/kvm/arm/mmu.c 			pa += vm_start - vma->vm_start;
vma              1405 virt/kvm/kvm_main.c 	struct vm_area_struct *vma;
vma              1415 virt/kvm/kvm_main.c 	vma = find_vma(current->mm, addr);
vma              1416 virt/kvm/kvm_main.c 	if (!vma)
vma              1419 virt/kvm/kvm_main.c 	size = vma_kernel_pagesize(vma);
vma              1583 virt/kvm/kvm_main.c static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
vma              1585 virt/kvm/kvm_main.c 	if (unlikely(!(vma->vm_flags & VM_READ)))
vma              1588 virt/kvm/kvm_main.c 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
vma              1594 virt/kvm/kvm_main.c static int hva_to_pfn_remapped(struct vm_area_struct *vma,
vma              1602 virt/kvm/kvm_main.c 	r = follow_pfn(vma, addr, &pfn);
vma              1617 virt/kvm/kvm_main.c 		r = follow_pfn(vma, addr, &pfn);
vma              1660 virt/kvm/kvm_main.c 	struct vm_area_struct *vma;
vma              1685 virt/kvm/kvm_main.c 	vma = find_vma_intersection(current->mm, addr, addr + 1);
vma              1687 virt/kvm/kvm_main.c 	if (vma == NULL)
vma              1689 virt/kvm/kvm_main.c 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
vma              1690 virt/kvm/kvm_main.c 		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
vma              1696 virt/kvm/kvm_main.c 		if (async && vma_is_valid(vma, write_fault))
vma              2713 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
vma              2737 virt/kvm/kvm_main.c static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
vma              2739 virt/kvm/kvm_main.c 	vma->vm_ops = &kvm_vcpu_vm_ops;
vma              3123 virt/kvm/kvm_main.c static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
vma              3128 virt/kvm/kvm_main.c 		return dev->ops->mmap(dev, vma);